#include "inference_engine_mlapi_private.h"
#include <fstream>
+#include <sstream>
#include <iostream>
#include <unistd.h>
#include <time.h>
throw std::runtime_error("shouldn't be reach here");
}
}
+ std::string InferenceMLAPI::GetFileCustomProp(std::string &path)
+ {
+ std::string custom;
+ std::string custom_file = path.substr(0, path.find_last_of(".")) + ".custom";
+ if (IsFileReadable(custom_file)) {
+ std::ifstream fp(custom_file);
+ std::stringstream buffer;
+ buffer << fp.rdbuf();
+ custom = buffer.str();
+ }
+ return custom;
+ }
std::string InferenceMLAPI::GetCustomProp()
{
if (mPluginType != INFERENCE_BACKEND_SNPE)
}
}
auto customOp = GetCustomProp();
+ customOp += GetFileCustomProp(model_str);
LOGI("customOp: %s", customOp.c_str());
int err = ml_single_open_full(&mSingle, model_str.c_str(), in_info, out_info,
std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> GetNNFWInfo();
std::string GetModelPath(const std::vector<std::string>& model_paths);
std::string GetCustomProp();
+ std::string GetFileCustomProp(std::string& path);
int GetTensorInfo(std::map<std::string, int>& designated_layers,
std::map<std::string, inference_engine_tensor_buffer> &buffers,
ml_tensors_data_h& dataHandle, ml_tensors_info_h& infoHandle);