vision: add initial code of new adaptation layer API
authorInki Dae <inki.dae@samsung.com>
Mon, 3 Feb 2020 08:08:32 +0000 (17:08 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:42:53 +0000 (09:42 +0900)
Change-Id: Id3b708ca882354eef56ef58e77b155938c12ec72
Signed-off-by: Inki Dae <inki.dae@samsung.com>
vision/inference_engine_vision_impl.cpp

index 7263d894447841832e5445645b492b89c397a51a..60ced24c6cfe5ea21b1eefc151f276bb3a28d9bf 100644 (file)
@@ -105,6 +105,20 @@ int InferenceEngineVision::Init(std::string configFile,
     return INFERENCE_ENGINE_ERROR_NONE;
 }
 
+int InferenceEngineVision::Init(inference_engine_config *config)
+{
+    LOGI("ENTER");
+
+    int ret = mCommonEngine->Init(config);
+    if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+        LOGE("Fail to init");
+        return ret;
+    }
+
+    LOGI("LEAVE");
+    return ret;
+}
+
 int InferenceEngineVision::SetUserFile(std::string filename)
 {
     std::ifstream fp(filename.c_str());
@@ -165,6 +179,46 @@ int InferenceEngineVision::SetInputTensorParamNode(std::string node)
     return ret;
 }
 
+int InferenceEngineVision::GetInputTensorProperty(inference_engine_layer_property *property)
+{
+    LOGE("ENTER");
+    int ret = mCommonEngine->GetInputTensorProperty(property);
+    if (ret != INFERENCE_ENGINE_ERROR_NONE)
+        LOGE("Fail to GetInputTensorProperty");
+    LOGE("LEAVE");
+    return ret;
+}
+
+int InferenceEngineVision::GetOutputTensorProperty(inference_engine_layer_property *property)
+{
+    LOGE("ENTER");
+    int ret = mCommonEngine->GetOutputTensorProperty(property);
+    if (ret != INFERENCE_ENGINE_ERROR_NONE)
+        LOGE("Fail to GetOutputTensorProperty");
+    LOGE("LEAVE");
+    return ret;
+}
+
+int InferenceEngineVision::SetInputTensorProperty(inference_engine_layer_property &property)
+{
+    LOGE("ENTER");
+    int ret = mCommonEngine->SetInputTensorProperty(property);
+    if (ret != INFERENCE_ENGINE_ERROR_NONE)
+        LOGE("Fail to SetInputTensorProperty");
+    LOGE("LEAVE");
+    return ret;
+}
+
+int InferenceEngineVision::SetOutputTensorProperty(inference_engine_layer_property &property)
+{
+    LOGE("ENTER");
+    int ret = mCommonEngine->SetOutputTensorProperty(property);
+    if (ret != INFERENCE_ENGINE_ERROR_NONE)
+        LOGE("Fail to SetOutputTensorProperty");
+    LOGE("LEAVE");
+    return ret;
+}
+
 int InferenceEngineVision::SetTargetDevice(inference_target_type_e type)
 {
     int ret = mCommonEngine->SetTargetDevice(type);
@@ -266,6 +320,36 @@ int InferenceEngineVision::Load()
     return ret;
 }
 
+int InferenceEngineVision::Load(const char **models, const unsigned int num_of_models)
+{
+    LOGI("ENTER");
+
+    int ret = mCommonEngine->Load(models, num_of_models);
+    if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+        LOGE("Fail to load InferenceEngineVision");
+        return ret;
+    }
+
+    LOGI("LEAVE");
+
+    return ret;
+}
+
+int InferenceEngineVision::GetBackendCapacity(inference_engine_capacity *capacity)
+{
+    LOGI("ENTER");
+
+    int ret = mCommonEngine->GetBackendCapacity(capacity);
+    if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+        LOGE("Fail to get backend capacity");
+        return ret;
+    }
+
+    LOGI("LEAVE");
+
+    return ret;
+}
+
 int InferenceEngineVision::SetInput(cv::Mat cvImg)
 {
     mSourceSize = cvImg.size();
@@ -325,6 +409,18 @@ int InferenceEngineVision::Run(cv::Mat tensor)
     return ret;
 }
 
+int InferenceEngineVision::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+                               std::vector<inference_engine_tensor_buffer> &output_buffers)
+{
+    LOGI("ENTER");
+    int ret = mCommonEngine->Run(input_buffers, output_buffers);
+    if (ret != INFERENCE_ENGINE_ERROR_NONE)
+        LOGE("Fail to run InferenceEngineVision");
+
+    LOGI("LEAVE");
+    return ret;
+}
+
 int InferenceEngineVision::GetInferenceResult(ImageClassificationResults& results)
 {
     // Will contain top N results in ascending order.