Implement GetBackendCapacity callback
authorInki Dae <inki.dae@samsung.com>
Mon, 10 Feb 2020 06:28:23 +0000 (15:28 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:42:53 +0000 (09:42 +0900)
Change-Id: I4f30d077f85a54d09bd97b22a353f564d34a9a33
Signed-off-by: Inki Dae <inki.dae@samsung.com>
common/inference_engine_common_impl.cpp
include/inference_engine_common.h
include/inference_engine_type.h

index 7286efb63b02ec93b81ee619c6f1fa653a4c4fc6..0501c6c47006967a45fa5c766c80ebf2d1ecf117 100755 (executable)
@@ -233,10 +233,7 @@ int InferenceEngineCommon::SetOutputTensorProperty(inference_engine_layer_proper
 
 int InferenceEngineCommon::GetBackendCapacity(inference_engine_capacity *capacity)
 {
-    LOGI("ENTER");
-    LOGI("LEAVE");
-
-    return INFERENCE_ENGINE_ERROR_NONE;
+    return engine->GetBackendCapacity(capacity);
 }
 
 int InferenceEngineCommon::Run()
index e446b4fe7ee11eae362f86048d643a77f217a19b..168acf1f75d7c07d5ce8d78c115e5c383a674bd0 100755 (executable)
@@ -120,7 +120,7 @@ public:
      *
      * @since_tizen 6.0
      */
-    virtual int GetBackendCapacity(inference_engine_capacity *capacity) { return 0; }
+    virtual int GetBackendCapacity(inference_engine_capacity *capacity) = 0;
 
     /**
      * @brief Run an inference. Deprecated.
index c2af1c70cd419ede422cc2d75be1a6657276c372..c2bd4b281f7fbf2e60f7924f7b5ececa45e5ff8d 100644 (file)
@@ -172,7 +172,7 @@ typedef struct _inference_engine_layer_property {
  * @since_tizen 6.0
  */
 typedef struct _inference_engine_capacity {
-    std::vector<inference_target_type_e> supported_accel_devices;
+    int supported_accel_devices;
     inference_tensor_shape_e supported_tensor_shape;
     std::vector<std::string> supported_nn_models;
     // TODO.