{
LOGI("ENTER");
- // ML Single API of MLAPI requires model_paths rule like below,
- // "so library file path,nb model file path" or vise versa.
- std::string model_str(model_paths[0] + "," + model_paths[1]);
-
- LOGI("Model name = %s", model_str.c_str());
+ std::string model_str("");
// TODO. Set NNFW backend type and HW type properly.
LOGE("Invalid target device type.");
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
}
+
+ // ML Single API of MLAPI requires model_paths rule like below,
+ // "so library file path,nb model file path" or vise versa.
+ model_str += model_paths[0] + "," + model_paths[1];
break;
case INFERENCE_BACKEND_NNFW:
nnfw_type = ML_NNFW_TYPE_NNFW;
+
if (mTargetDevice == INFERENCE_TARGET_CPU) {
nnfw_hw = ML_NNFW_HW_CPU_NEON;
LOGI("Target device is NEON.");
LOGE("Invalid inference target device type.");
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
+
+ model_str += model_paths[0];
LOGI("NNFW tensor filter will be used.");
break;
// TODO.
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
+ LOGI("Model name = %s", model_str.c_str());
+
+ // TODO. create ml_tensor_info for input and output tensor and pass
+ // them as parameters of ml_single_open function.
+
int ret = ml_single_open(&mSingle, model_str.c_str(), NULL, NULL,
nnfw_type, nnfw_hw);
if (ret != ML_ERROR_NONE) {