.backend_name = backendName,
.target_devices = INFERENCE_TARGET_CPU,
};
+
+ // Create backend class object.
mBackend = new InferenceEngineVision(&config);
if (!mBackend) {
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
}
- ret = mBackend->Init(mConfig.mConfigFilePath,
- mConfig.mWeightFilePath,
- mConfig.mUserFilePath);
-
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- return ConvertEngineErrorToVisionError(ret);
- }
+ // Bind backend library.
+ ret = mBackend->BindBackend(&config);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to bind backend library.(%d)", mConfig.mBackedType);
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
// Input Tensor Param
mBackend->SetInputTensorParamInput(mConfig.mTensorInfo.width,
mBackend->SetOutputTensorParamNodes(mConfig.mOutputNodeNames);
- // load model
- ret = mBackend->Load();
+ // Add model files to load.
+ // TODO. model file and its corresponding label file should be added by
+ // user request.
+ std::vector<std::string> models;
+ models.push_back(mConfig.mWeightFilePath);
+ models.push_back(mConfig.mUserFilePath);
+
+ // Request model loading to backend engine.
+ ret = mBackend->Load(models, 1);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
delete mBackend;
LOGE("Fail to load model");
// target type
// foreach supported??
- mBackend->SetTargetDevice(mConfig.mTargetType);
+ mBackend->SetTargetDevice(config.target_devices);
LOGE("LEAVE");
return MEDIA_VISION_ERROR_NONE;
}