Change behavior before and after pInfer->Bind() 47/263047/1
authorTae-Young Chung <ty83.chung@samsung.com>
Wed, 25 Aug 2021 04:57:13 +0000 (13:57 +0900)
committerTae-Young Chung <ty83.chung@samsung.com>
Wed, 25 Aug 2021 04:57:16 +0000 (13:57 +0900)
In Inference::Bind(), pInfer->Bind(), mConfig.mTargetTypes is used.
But, mConfig.mTargetTypes is set by pInfer->ConfigureTargetTypes and
pInfer->ConfigureTargetDevices after pInfer->Bind().
So we change the behavior as follows.
before pInfer->Bind(),
mConfig.mTargetTypes is set by pInfer->ConfigureTargetTypes().
After pInfer->Bind(),
pInfer->CheckSupportedTargetDevice() just checks the mConfig.mTargetTypes
is supported or not.

Change-Id: I79d2dc6372aac0c503783fb9fef2b4eccecbb018
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
mv_machine_learning/mv_inference/inference/include/Inference.h
mv_machine_learning/mv_inference/inference/src/Inference.cpp
mv_machine_learning/mv_inference/inference/src/mv_inference_open.cpp
packaging/capi-media-vision.spec

index 030f9ec..8f27975 100644 (file)
@@ -182,7 +182,7 @@ namespace inference
                 *
                 * @since_tizen 6.0 (Deprecated)
                 */
-               int ConfigureTargetTypes(const int targetType);
+               int ConfigureTargetTypes(int targetType, bool isNewVersion);
 
                /**
                 * @brief   Configure inference target devices such as CPU, GPU or NPU. (one more types can be combined)
@@ -192,6 +192,13 @@ namespace inference
                int ConfigureTargetDevices(const int targetDevices);
 
                /**
+                * @brief   Check supported target devices
+                *
+                * @since_tizen 6.5
+                */
+               int CheckSupportedTargetDevice(const int targetDevices);
+
+               /**
                 * @brief   Configure the maximum number of inference results
                 *
                 * @since_tizen 5.5
index 2af0f6d..756176f 100755 (executable)
@@ -528,47 +528,41 @@ namespace inference
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       int Inference::ConfigureTargetTypes(const int targetType)
+       int Inference::ConfigureTargetTypes(int targetType, bool isNewVersion)
        {
-               // Check if given target types are valid or not.
-               if (MV_INFERENCE_TARGET_NONE >= targetType ||
-                       MV_INFERENCE_TARGET_MAX <= targetType) {
-                       LOGE("Invalid target device.");
-                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               }
+               if (isNewVersion) {
+                       if (MV_INFERENCE_TARGET_DEVICE_NONE >= targetType ||
+                               MV_INFERENCE_TARGET_DEVICE_MAX <= targetType) {
+                               LOGE("Invalid target device.");
+                               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+                       }
+               } else {
+                       if (MV_INFERENCE_TARGET_NONE >= targetType ||
+                               MV_INFERENCE_TARGET_MAX <= targetType) {
+                               LOGE("Invalid target device.");
+                               return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+                       }
 
-               LOGI("Before converting target types : %d", targetType);
+                       LOGI("Before converting target types : %d", targetType);
 
-               unsigned int new_type = MV_INFERENCE_TARGET_DEVICE_NONE;
+                       // Convert old type to new one.
+                       switch (targetType) {
+                       case MV_INFERENCE_TARGET_CPU:
+                               targetType = MV_INFERENCE_TARGET_DEVICE_CPU;
+                               break;
+                       case MV_INFERENCE_TARGET_GPU:
 
-               // Convert old type to new one.
-               switch (targetType) {
-               case MV_INFERENCE_TARGET_CPU:
-                       if (!(mBackendCapacity.supported_accel_devices & INFERENCE_TARGET_CPU)) {
-                               LOGE("Backend doesn't support CPU acceleration.");
-                               return MEDIA_VISION_ERROR_NOT_SUPPORTED;
-                       }
-                       new_type = MV_INFERENCE_TARGET_DEVICE_CPU;
-                       break;
-               case MV_INFERENCE_TARGET_GPU:
-                       if (!(mBackendCapacity.supported_accel_devices & INFERENCE_TARGET_GPU)) {
-                               LOGE("Backend doesn't support GPU acceleration.");
-                               return MEDIA_VISION_ERROR_NOT_SUPPORTED;
-                       }
-                       new_type = MV_INFERENCE_TARGET_DEVICE_GPU;
-                       break;
-               case MV_INFERENCE_TARGET_CUSTOM:
-                       if (!(mBackendCapacity.supported_accel_devices & INFERENCE_TARGET_CUSTOM)) {
-                               LOGE("Backend doesn't support custom device acceleration.");
-                               return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+                               targetType = MV_INFERENCE_TARGET_DEVICE_GPU;
+                               break;
+                       case MV_INFERENCE_TARGET_CUSTOM:
+                               targetType = MV_INFERENCE_TARGET_DEVICE_CUSTOM;
+                               break;
                        }
-                       new_type = MV_INFERENCE_TARGET_DEVICE_CUSTOM;
-                       break;
-               }
 
-               LOGI("After converting target types : %d", new_type);
+                       LOGI("After converting target types : %d", targetType);
+               }
 
-               mConfig.mTargetTypes = new_type;
+               mConfig.mTargetTypes = targetType;
 
                return MEDIA_VISION_ERROR_NONE;
        }
@@ -594,6 +588,16 @@ namespace inference
                return MEDIA_VISION_ERROR_NONE;
        }
 
+       int Inference::CheckSupportedTargetDevice(const int targetDevices)
+       {
+               if (!(mBackendCapacity.supported_accel_devices & targetDevices)) {
+                       LOGE("Backend doesn't support a given %d device acceleration.", targetDevices);
+                       return MEDIA_VISION_ERROR_NOT_SUPPORTED;
+               }
+
+               return MEDIA_VISION_ERROR_NONE;
+       }
+
        void Inference::ConfigureOutput(const int maxOutputNumbers)
        {
                mConfig.mMaxOutputNumbers = std::max(
index c6bb99a..eeca357 100644 (file)
@@ -409,6 +409,13 @@ int mv_inference_configure_engine_open(mv_inference_h infer,
        if (ret != MEDIA_VISION_ERROR_NONE)
                goto _ERROR_;
 
+       // Convert old type to new one and then use it if is_new_version is false
+       if (pInfer->ConfigureTargetTypes(targetTypes, is_new_version) !=
+               MEDIA_VISION_ERROR_NONE) {
+               LOGE("Tried to configure invalid target types.");
+               goto _ERROR_;
+       }
+
        // Create a inference-engine-common class object and load its corresponding library.
        // Ps. Inference engine gets a capability from a given backend by Bind call
        // so access to mBackendCapacity should be done after Bind.
@@ -417,20 +424,10 @@ int mv_inference_configure_engine_open(mv_inference_h infer,
                LOGE("Fail to bind a backend engine.");
        }
 
-       if (is_new_version) {
-               // Use new type.
-               if (pInfer->ConfigureTargetDevices(targetTypes) !=
-                       MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Tried to configure invalid target types.");
-                       goto _ERROR_;
-               }
-       } else {
-               // Convert old type to new one and then use it.
-               if (pInfer->ConfigureTargetTypes(targetTypes) !=
-                       MEDIA_VISION_ERROR_NONE) {
-                       LOGE("Tried to configure invalid target types.");
-                       goto _ERROR_;
-               }
+       if (pInfer->CheckSupportedTargetDevice(targetTypes) !=
+               MEDIA_VISION_ERROR_NONE) {
+               LOGE("Tried to configure invalid target types.");
+               goto _ERROR_;
        }
 
        LOGI("LEAVE");
index 81598bc..0109d12 100644 (file)
@@ -1,6 +1,6 @@
 Name:        capi-media-vision
 Summary:     Media Vision library for Tizen Native API
-Version:     0.8.11
+Version:     0.8.12
 Release:     0
 Group:       Multimedia/Framework
 License:     Apache-2.0 and BSD-3-Clause