int EngineConfig::getStringAttribute(const std::string& key, std::string *value) const
{
DictStrConstIter dictIter = m_strDict.find(key);
+
if (dictIter == m_strDict.end()) {
LOGE("Attempt to access to the unsupported string attribute [%s] "
"of the engine config %p", key.c_str(), this);
void ConfigureOutputNodeNames(const std::vector<std::string> nodeNames);
/**
- * @brief Prepares inference
- * @details Use this function to create the instance based on
- * the configured backend, to set tensor information,
- * and load the models.
+ * @brief Bind a backend engine
+ * @details Use this function to bind a backend engine for the inference.
+ * This creates a inference engine common class object, and loads a backend
+ * library which inferfaces with a Neural Network runtime such as TF Lite,
+ * OpenCV, ARMNN and so on.
*
- * @since_tizen 5.5
+ * Ps. The created inference engine common object will be released and its
+ * corresponding backend library will be unbound when deconstructor
+ * of Inference class will be called.
+ *
+ * @since_tizen 6.0
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ */
+ int Bind();
+
+ /**
+ * @brief Set default configuration for the inference
+ * @details Use this function to set default configuration given in json file by user.
+ *
+ * Ps. this callback should be called after Bind callback.
+ *
+ * @since_tizen 6.0
*
* @return @c 0 on success, otherwise a negative error value
* @retval #MEDIA_VISION_ERROR_NONE Successful
*/
int Prepare();
+ /**
+ * @brief Load model files
+ * @details Use this function to load given model files for the inference.
+ *
+ * Ps. this callback should be called after Prepare callback.
+ *
+ * @since_tizen 6.0
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ */
+ int Load();
+
/**
* @brief Runs inference with the roi of a given image
* @details Use this function to run forward pass with the given image.
int GetResults(std::vector<std::vector<int>>* dimInfo, std::vector<float*> *results);
+ mv_engine_config_h GetEngineConfig(void) { return engine_config; }
+
+ void SetEngineConfig(mv_engine_config_h config) { engine_config = config; }
+
private:
bool mCanRun; /**< The flag indicating ready to run Inference */
InferenceConfig mConfig;
+ mv_engine_config_h engine_config;
InferenceEngineVision * mBackend;
/*************/
/* Inference */
/*************/
+
+mv_engine_config_h mv_inference_get_engine_config(mv_inference_h infer);
+
/**
* @brief Create infernce handle.
* @details Use this function to create an inference handle. After creation
auto iter = mSupportedInferenceBackend.find(i);
LOGE("%d: %s: %s", i, (iter->second).first.c_str(), (iter->second).second ? "TRUE" : "FALSE");
}
+
LOGI("LEAVE");
}
mConfig.mOutputNodeNames = nodeNames;
}
-int Inference::Prepare()
+int Inference::Bind(void)
{
- int ret = INFERENCE_ENGINE_ERROR_NONE;
- LOGE("ENTER");
+ LOGI("ENTER");
+
if (mConfig.mBackedType <= MV_INFERENCE_BACKEND_NONE ||
mConfig.mBackedType >= MV_INFERENCE_BACKEND_MAX) {
LOGE("NOT SUPPORTED BACKEND %d", mConfig.mBackedType);
auto iter = mSupportedInferenceBackend.find(mConfig.mBackedType);
std::string backendName = (iter->second).first;
- LOGE("backend string name: %s", backendName.c_str());
+ LOGI("backend string name: %s", backendName.c_str());
- inference_engine_config config = {
- .backend_name = backendName,
- .target_devices = mConfig.mTargetType,
- };
+ inference_engine_config config = {
+ .backend_name = backendName,
+ // As a default, Target device is CPU. If user defined desired device type in json file
+ // then the device type will be set by Load callback.
+ .target_devices = mConfig.mTargetType,
+ };
- // Create backend class object.
+ // Create a backend class object.
mBackend = new InferenceEngineVision(&config);
-
if (!mBackend) {
LOGE("Fail to create backend");
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
}
- // Bind backend library.
- ret = mBackend->BindBackend(&config);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Fail to bind backend library.(%d)", mConfig.mBackedType);
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
+ // Bind a backend library.
+ int ret = mBackend->BindBackend(&config);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to bind backend library.(%d)", mConfig.mBackedType);
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("LEAVE");
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int Inference::Prepare(void)
+{
+ LOGI("ENTER");
// Input Tensor Param
mBackend->SetInputTensorParamInput(mConfig.mTensorInfo.width,
mBackend->SetOutputTensorParamNodes(mConfig.mOutputNodeNames);
+ mBackend->SetTargetDevice(mConfig.mTargetType);
+
+ LOGI("LEAVE");
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int Inference::Load(void)
+{
+ LOGI("ENTER");
+
// Add model files to load.
// TODO. model file and its corresponding label file should be added by
// user request.
models.push_back(mConfig.mUserFilePath);
// Request model loading to backend engine.
- ret = mBackend->Load(models, 1);
+ int ret = mBackend->Load(models, 1);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
delete mBackend;
LOGE("Fail to load model");
mCanRun = true;
- // target type
- // foreach supported??
- mBackend->SetTargetDevice(config.target_devices);
- LOGE("LEAVE");
+ LOGI("LEAVE");
+
return MEDIA_VISION_ERROR_NONE;
}
#else
- ret = mv_inference_configure_model_open(infer, engine_config);
+ ret = mv_inference_configure_engine_open(infer, engine_config);
if (ret != MEDIA_VISION_ERROR_NONE){
- LOGE("Fail to configure model");
+ LOGE("Fail to configure engine and target");
return ret;
}
- ret = mv_inference_configure_tensor_info_open(infer, engine_config);
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return ret;
+}
+
+
+int mv_inference_prepare(mv_inference_h infer)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ mv_engine_config_h engine_config = mv_inference_get_engine_config(infer);
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ //ret = mv_inference_prepare_lic(infer);
+
+#else
+
+ ret = mv_inference_configure_model_open(infer, engine_config);
if (ret != MEDIA_VISION_ERROR_NONE){
- LOGE("Fail to configure tensor information");
+ LOGE("Fail to configure model");
return ret;
}
- ret = mv_inference_configure_engine_open(infer, engine_config);
+ ret = mv_inference_configure_tensor_info_open(infer, engine_config);
if (ret != MEDIA_VISION_ERROR_NONE){
- LOGE("Fail to configure engine and target");
+ LOGE("Fail to configure tensor information");
return ret;
}
return ret;
}
-#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
-
- MEDIA_VISION_FUNCTION_LEAVE();
- return ret;
-}
-
-
-int mv_inference_prepare(mv_inference_h infer)
-{
- MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
- MEDIA_VISION_INSTANCE_CHECK(infer);
-
- MEDIA_VISION_FUNCTION_ENTER();
-
- int ret = MEDIA_VISION_ERROR_NONE;
-
-#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
-
- //ret = mv_inference_prepare_lic(infer);
-
-#else
-
ret = mv_inference_prepare_open(infer);
#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
using namespace mediavision::inference;
+mv_engine_config_h mv_inference_get_engine_config(mv_inference_h infer)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+ return pInfer->GetEngineConfig();
+}
+
int mv_inference_create_open(mv_inference_h *infer)
{
if (infer == NULL ) {
int mv_inference_configure_model_open(mv_inference_h infer, mv_engine_config_h engine_config)
{
+ LOGI("ENTER");
+
Inference *pInfer = static_cast<Inference *>(infer);
int ret = MEDIA_VISION_ERROR_NONE;
double modelMeanValue = 0.0;
int backendType= 0;
size_t userFileLength = 0;
+
ret = mv_engine_config_get_string_attribute(engine_config,
MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
&modelConfigFilePath);
if (modelUserFilePath)
free(modelUserFilePath);
+ LOGI("LEAVE");
+
return ret;
}
int mv_inference_configure_engine_open(mv_inference_h infer, mv_engine_config_h engine_config)
{
Inference *pInfer = static_cast<Inference *>(infer);
-
int backendType = 0;
int targetType = 0;
int ret = MEDIA_VISION_ERROR_NONE;
+ pInfer->SetEngineConfig(engine_config);
+
ret = mv_engine_config_get_int_attribute(engine_config,
MV_INFERENCE_BACKEND_TYPE,
&backendType);
ret = pInfer->ConfigureEngine((mv_inference_backend_type_e)backendType,
(mv_inference_target_type_e)targetType);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure a backend engine.");
+ goto _ERROR_;
+ }
+
+ // Create a inference-engine-common class object and load its corresponding library.
+ ret = pInfer->Bind();
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to bind a backend engine.");
+ }
_ERROR_:
return ret;
int ret = MEDIA_VISION_ERROR_NONE;
+ // Pass parameters needed to load model files to a backend engine.
ret = pInfer->Prepare();
- if (ret != MEDIA_VISION_ERROR_NONE)
+ if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to prepare inference");
+ return ret;
+ }
+
+ // Request to load model files to a backend engine.
+ ret = pInfer->Load();
+ if (ret != MEDIA_VISION_ERROR_NONE)
+ LOGE("Fail to load model files.");
return ret;
}
#else
- ret = mv_inference_configure_model_open(infer, engine_config);
+ ret = mv_inference_configure_engine_open(infer, engine_config);
if (ret != MEDIA_VISION_ERROR_NONE){
- LOGE("Fail to configure model");
+ LOGE("Fail to configure engine and target");
return ret;
}
- ret = mv_inference_configure_tensor_info_open(infer, engine_config);
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return ret;
+}
+
+
+int mv_inference_prepare(mv_inference_h infer)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ mv_engine_config_h engine_config = mv_inference_get_engine_config(infer);
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ //ret = mv_inference_prepare_lic(infer);
+
+#else
+
+ ret = mv_inference_configure_model_open(infer, engine_config);
if (ret != MEDIA_VISION_ERROR_NONE){
- LOGE("Fail to configure tensor information");
+ LOGE("Fail to configure model");
return ret;
}
- ret = mv_inference_configure_engine_open(infer, engine_config);
+ ret = mv_inference_configure_tensor_info_open(infer, engine_config);
if (ret != MEDIA_VISION_ERROR_NONE){
- LOGE("Fail to configure engine and target");
+ LOGE("Fail to configure tensor information");
return ret;
}
return ret;
}
-#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
-
- MEDIA_VISION_FUNCTION_LEAVE();
- return ret;
-}
-
-
-int mv_inference_prepare(mv_inference_h infer)
-{
- MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
- MEDIA_VISION_INSTANCE_CHECK(infer);
-
- MEDIA_VISION_FUNCTION_ENTER();
-
- int ret = MEDIA_VISION_ERROR_NONE;
-
-#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
-
- //ret = mv_inference_prepare_lic(infer);
-
-#else
-
ret = mv_inference_prepare_open(infer);
#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */