}
}
- const char *InferenceMLAPI::GetCustomProp()
+ std::string InferenceMLAPI::GetCustomProp()
{
if (mPluginType != INFERENCE_BACKEND_SNPE)
return "";
- return mTargetDevice == INFERENCE_TARGET_CPU ? "RUNTIME:CPU" :
- mTargetDevice == INFERENCE_TARGET_GPU ? "RUNTIME:GPU" : "RUNTIME:DSP";
+ std::string custom;
+ if (mTargetDevice == INFERENCE_TARGET_CPU)
+ custom = "Runtime:CPU";
+ else if (mTargetDevice == INFERENCE_TARGET_GPU)
+ custom = "Runtime:GPU";
+ else
+ custom = "Runtime:DSP";
+
+ if (!mOutputProperty.layers.empty())
+ {
+ size_t layerSize = mOutputProperty.layers.size();
+ custom += ",OutputTensor:";
+
+ for (auto &layer : mOutputProperty.layers)
+ {
+ LOGI("output layer name = %s", layer.first.c_str());
+ custom += layer.first;
+ if (--layerSize > 0)
+ {
+ custom += ";";
+ }
+ }
+ custom += ",UserBuffer:true";
+ }
+
+ return custom;
}
int InferenceMLAPI::Load(std::vector<std::string> model_paths,
return ret;
}
}
+ auto customOp = GetCustomProp();
+ LOGI("customOp: %s", customOp.c_str());
int err = ml_single_open_full(&mSingle, model_str.c_str(), in_info, out_info,
- nnfw_type, nnfw_hw, GetCustomProp());
+ nnfw_type, nnfw_hw, customOp.c_str());
if (err != ML_ERROR_NONE) {
LOGE("Failed to request ml_single_open_full(%d).", err);
ml_tensors_info_destroy(in_info);
bool IsFileReadable(const std::string& path);
std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> GetNNFWInfo();
std::string GetModelPath(const std::vector<std::string>& model_paths);
- const char *GetCustomProp();
+ std::string GetCustomProp();
int GetTensorInfo(std::map<std::string, int>& designated_layers,
std::map<std::string, inference_engine_tensor_buffer> &buffers,
ml_tensors_data_h& dataHandle, ml_tensors_info_h& infoHandle);