return INFERENCE_ENGINE_ERROR_NONE;
}
+int InferenceEngineVision::Init(inference_engine_config *config)
+{
+ LOGI("ENTER");
+
+ int ret = mCommonEngine->Init(config);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to init");
+ return ret;
+ }
+
+ LOGI("LEAVE");
+ return ret;
+}
+
int InferenceEngineVision::SetUserFile(std::string filename)
{
std::ifstream fp(filename.c_str());
return ret;
}
+int InferenceEngineVision::GetInputTensorProperty(inference_engine_layer_property *property)
+{
+ LOGE("ENTER");
+ int ret = mCommonEngine->GetInputTensorProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ LOGE("Fail to GetInputTensorProperty");
+ LOGE("LEAVE");
+ return ret;
+}
+
+int InferenceEngineVision::GetOutputTensorProperty(inference_engine_layer_property *property)
+{
+ LOGE("ENTER");
+ int ret = mCommonEngine->GetOutputTensorProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ LOGE("Fail to GetOutputTensorProperty");
+ LOGE("LEAVE");
+ return ret;
+}
+
+int InferenceEngineVision::SetInputTensorProperty(inference_engine_layer_property &property)
+{
+ LOGE("ENTER");
+ int ret = mCommonEngine->SetInputTensorProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ LOGE("Fail to SetInputTensorProperty");
+ LOGE("LEAVE");
+ return ret;
+}
+
+int InferenceEngineVision::SetOutputTensorProperty(inference_engine_layer_property &property)
+{
+ LOGE("ENTER");
+ int ret = mCommonEngine->SetOutputTensorProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ LOGE("Fail to SetOutputTensorProperty");
+ LOGE("LEAVE");
+ return ret;
+}
+
int InferenceEngineVision::SetTargetDevice(inference_target_type_e type)
{
int ret = mCommonEngine->SetTargetDevice(type);
return ret;
}
+int InferenceEngineVision::Load(const char **models, const unsigned int num_of_models)
+{
+ LOGI("ENTER");
+
+ int ret = mCommonEngine->Load(models, num_of_models);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to load InferenceEngineVision");
+ return ret;
+ }
+
+ LOGI("LEAVE");
+
+ return ret;
+}
+
+int InferenceEngineVision::GetBackendCapacity(inference_engine_capacity *capacity)
+{
+ LOGI("ENTER");
+
+ int ret = mCommonEngine->GetBackendCapacity(capacity);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to get backend capacity");
+ return ret;
+ }
+
+ LOGI("LEAVE");
+
+ return ret;
+}
+
int InferenceEngineVision::SetInput(cv::Mat cvImg)
{
mSourceSize = cvImg.size();
return ret;
}
+int InferenceEngineVision::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+ std::vector<inference_engine_tensor_buffer> &output_buffers)
+{
+ LOGI("ENTER");
+ int ret = mCommonEngine->Run(input_buffers, output_buffers);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ LOGE("Fail to run InferenceEngineVision");
+
+ LOGI("LEAVE");
+ return ret;
+}
+
int InferenceEngineVision::GetInferenceResult(ImageClassificationResults& results)
{
// Will contain top N results in ascending order.