}
}
- const char* InferenceMLAPI::GetCustomProp()
+ std::string InferenceMLAPI::GetCustomProp()
{
if (mPluginType != INFERENCE_BACKEND_NPU_SNPE)
return "";
- return mTargetDevice == INFERENCE_TARGET_CPU ? "RUNTIME:CPU" :
- mTargetDevice == INFERENCE_TARGET_GPU ? "RUNTIME:GPU" : "RUNTIME:DSP";
+ std::string custom;
+ if (mTargetDevice == INFERENCE_TARGET_CPU)
+ custom = "Runtime:CPU";
+ else if (mTargetDevice == INFERENCE_TARGET_GPU)
+ custom = "Runtime:GPU";
+ else
+ custom = "Runtime:DSP";
+
+ if (!mOutputProperty.layers.empty()){
+ size_t layerSize = mOutputProperty.layers.size();
+ custom += ",OutputLayer:";
+
+ for (auto& layer : mOutputProperty.layers) {
+ LOGI("output layer name = %s", layer.first.c_str());
+ custom += layer.first;
+ if (--layerSize > 0) {
+ custom += ";";
+ }
+ }
+ }
+
+ return custom;
}
int InferenceMLAPI::Load(std::vector<std::string> model_paths,
ml_tensors_info_h in_info = NULL, out_info = NULL;
+ auto customOp = GetCustomProp();
+ LOGI("customOp: %s", customOp.c_str());
+
int err = ml_single_open_full(&mSingle, model_str.c_str(), in_info, out_info,
- nnfw_type, nnfw_hw, GetCustomProp());
+ nnfw_type, nnfw_hw, customOp.c_str());
if (err != ML_ERROR_NONE) {
LOGE("Failed to request ml_single_open_full(%d).", err);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
bool IsFileReadable(const std::string& path);
std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> GetNNFWInfo();
std::string GetModelPath(const std::vector<std::string>& model_paths);
- const char* GetCustomProp();
+ std::string GetCustomProp();
#else
int ConvertTensorType(int tensor_type);
#endif