// Initialize inference engine object for backbone model.
_backbone = make_unique<Inference>();
- _backbone->ConfigureBackendType(_config.backbone_engine_backend_type);
- _backbone->ConfigureTargetTypes(static_cast<int>(_config.backbone_target_device_type), true);
- _backbone->Bind();
+
+ int ret = _backbone->Bind(_config.backbone_engine_backend_type, _config.backbone_target_device_type);
+ if (ret != MEDIA_VISION_ERROR_NONE)
+ return ret;
// Tensor order is NCHW.
vector<model_layer_info>& input_layer_info = GetBackboneInputLayerInfo();
_backbone->ConfigureInputInfo(width, height, 1, ch, 127.5f, 127.5f, MV_INFERENCE_DATA_FLOAT32, input_layer_names);
_backbone->ConfigureOutputInfo(output_layer_names, output_tensor_info);
_backbone->ConfigureModelFiles("", _face_net_info->GetModelFilePath(), "");
- _backbone->Load();
+
+ ret = _backbone->Load();
+ if (ret != MEDIA_VISION_ERROR_NONE)
+ return ret;
_training_model = make_unique<SimpleShot>(_config.training_engine_backend_type,
_config.training_target_device_type,
_config.internal_model_file_path);
_internal = make_unique<Inference>();
- _internal->ConfigureBackendType(_config.inference_engine_backend_type);
- _internal->ConfigureTargetTypes(_config.inference_target_device_type, true);
- _internal->ConfigureModelFiles("", _config.internal_model_file_path, "");
- _internal->Bind();
+
+ ret = _internal->Bind(_config.inference_engine_backend_type, _config.inference_target_device_type);
+ if (ret != MEDIA_VISION_ERROR_NONE)
+ return ret;
+
_initialized = true;
output_tensor_info[0].shape[0] = _label_manager->GetMaxLabel();
_internal->ConfigureOutputInfo(output_layers, output_tensor_info);
+ _internal->ConfigureModelFiles("", _config.internal_model_file_path, "");
+
// Load the trained internal model.
ret = _internal->Load();
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
mv_inference_data_type_e mDataType; /**< Data type of a input tensor */
- mv_inference_backend_type_e mBackedType; /**< Backed type of model files */
-
int mTargetTypes; /**< Target type to run inference */
double mConfidenceThresHold; /**< Confidence threshold value */
void ConfigureOutputInfo(std::vector<std::string> names,
std::vector<inference_engine_tensor_info>& tensors_info);
- /**
- * @brief Configure inference backend type.
- *
- * @since_tizen 6.0
- */
- int ConfigureBackendType(const mv_inference_backend_type_e backendType);
-
/**
* @brief Configure a inference target device type such as CPU, GPU or NPU. (only one type can be set)
* @details Internally, a given device type will be converted to new type.
* @retval #MEDIA_VISION_ERROR_NONE Successful
* @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
*/
- int Bind();
+ int Bind(int backend_type, int device_type);
/**
* @brief Load model files
private:
void CheckSupportedInferenceBackend();
+ int CheckBackendType(const mv_inference_backend_type_e backendType);
bool IsTargetDeviceSupported(const int targetDevices);
int ConvertEngineErrorToVisionError(int error);
int ConvertTargetTypes(int given_types);
mWeightFilePath(),
mUserFilePath(),
mDataType(MV_INFERENCE_DATA_FLOAT32),
- mBackedType(MV_INFERENCE_BACKEND_NONE),
mTargetTypes(MV_INFERENCE_TARGET_DEVICE_CPU),
mConfidenceThresHold(),
mMeanValue(),
LOGI("LEAVE");
}
- int Inference::ConfigureBackendType(
- const mv_inference_backend_type_e backendType)
+ int Inference::CheckBackendType(const mv_inference_backend_type_e backendType)
{
// Check if a given backend type is valid or not.
if (backendType <= MV_INFERENCE_BACKEND_NONE ||
LOGI("backend engine : %d", backendType);
- mConfig.mBackedType = backendType;
-
return MEDIA_VISION_ERROR_NONE;
}
return MEDIA_VISION_ERROR_NONE;
}
- int Inference::Bind(void)
+ int Inference::Bind(int backend_type, int device_type)
{
LOGI("ENTER");
- if (mConfig.mBackedType <= MV_INFERENCE_BACKEND_NONE ||
- mConfig.mBackedType >= MV_INFERENCE_BACKEND_MAX) {
- LOGE("NOT SUPPORTED BACKEND %d", mConfig.mBackedType);
- return MEDIA_VISION_ERROR_INVALID_OPERATION;
- }
+ int ret = CheckBackendType(static_cast<mv_inference_backend_type_e>(backend_type));
+ if (ret != MEDIA_VISION_ERROR_NONE)
+ return ret;
- std::string backendName = mSupportedInferenceBackend[mConfig.mBackedType].first;
+ std::string backendName = mSupportedInferenceBackend[backend_type].first;
LOGI("backend string name: %s", backendName.c_str());
inference_engine_config config = {
.backend_name = backendName,
- .backend_type = mConfig.mBackedType,
+ .backend_type = backend_type,
// As a default, Target device is CPU. If user defined desired device type in json file
// then the device type will be set by Load callback.
- .target_devices = mConfig.mTargetTypes,
+ .target_devices = device_type,
};
// Create a backend class object.
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
}
- int ret = MEDIA_VISION_ERROR_NONE;
+ ret = MEDIA_VISION_ERROR_NONE;
// Load configuration file if a given backend type is mlapi.
if (config.backend_type == MV_INFERENCE_BACKEND_MLAPI) {
// Bind a backend library.
ret = mBackend->BindBackend(&config);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Fail to bind backend library.(%d)", mConfig.mBackedType);
+ LOGE("Fail to bind backend library.(%d)", ret);
return MEDIA_VISION_ERROR_INVALID_OPERATION;
}
goto out_of_function;
}
- ret = pInfer->ConfigureBackendType(
- (mv_inference_backend_type_e) backendType);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to configure a backend type.");
- goto out_of_function;
- }
-
ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_TARGET_DEVICE_TYPE, &targetTypes);
if (ret != MEDIA_VISION_ERROR_NONE) {
// Create a inference-engine-common class object and load its corresponding library.
// Ps. Inference engine gets a capability from a given backend by Bind call
// so access to mBackendCapacity should be done after Bind.
- ret = pInfer->Bind();
+ ret = pInfer->Bind(backendType, targetTypes);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to bind a backend engine.");
goto out_of_function;
Name: capi-media-vision
Summary: Media Vision library for Tizen Native API
-Version: 0.23.1
+Version: 0.23.2
Release: 0
Group: Multimedia/Framework
License: Apache-2.0 and BSD-3-Clause