InferenceMLAPI::~InferenceMLAPI()
{
mDesignated_inputs.clear();
- std::vector<std::string>().swap(mDesignated_inputs);
+ std::map<std::string, int>().swap(mDesignated_inputs);
mDesignated_outputs.clear();
- std::vector<std::string>().swap(mDesignated_outputs);
+ std::map<std::string, int>().swap(mDesignated_outputs);
ml_single_close(mSingle);
}
mPluginType = type;
-
+ LOGI("backend type.(%d)", type);
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceMLAPI::GetInputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
LOGI("ENTER");
buffers.clear();
- int ret;
- unsigned int cnt;
-
- ret = ml_tensors_info_get_count(mInputInfoHandle, &cnt);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("input tensor count = %u", cnt);
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
// TODO. Below is test code, should we allocate new buffer for every inference?
if (mInputDataHandle == NULL) {
}
// TODO. Cache tensor info and reduce function call in UpdateTensorsInfo()
- for (unsigned int i = 0; i < cnt; ++i) {
+ for (auto& input : mDesignated_inputs) {
inference_engine_tensor_buffer in_buffer;
ml_tensor_type_e in_type;
- ret = ml_tensors_data_get_tensor_data(mInputDataHandle, i, &in_buffer.buffer, &in_buffer.size);
+ ret = ml_tensors_data_get_tensor_data(mInputDataHandle, input.second, &in_buffer.buffer, &in_buffer.size);
if (ret != ML_ERROR_NONE) {
LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
LOGE("buffer = %p, size = %zu\n", in_buffer.buffer, in_buffer.size);
- ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, i, &in_type);
+ ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, input.second, &in_type);
if (ret != ML_ERROR_NONE) {
LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
in_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
in_buffer.owner_is_backend = 1;
- buffers.push_back(in_buffer);
+ buffers.insert(std::make_pair(input.first, in_buffer));
}
LOGI("LEAVE");
}
int InferenceMLAPI::GetOutputTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
LOGI("ENTER");
buffers.clear();
- int ret;
- unsigned int cnt;
-
- ret = ml_tensors_info_get_count(mOutputInfoHandle, &cnt);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("output tensor count = %u", cnt);
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
// TODO. Below is test code, should we allocate new buffer for every inference?
if (mOutputDataHandle == NULL) {
}
// TODO. Cache tensor info and reduce function call in UpdateTensorsInfo()
- for (unsigned int i = 0; i < cnt; ++i) {
+ for (auto& output : mDesignated_outputs) {
inference_engine_tensor_buffer out_buffer;
ml_tensor_type_e out_type;
- ret = ml_tensors_data_get_tensor_data(mOutputDataHandle, i, &out_buffer.buffer, &out_buffer.size);
+ ret = ml_tensors_data_get_tensor_data(mOutputDataHandle, output.second, &out_buffer.buffer, &out_buffer.size);
if (ret != ML_ERROR_NONE) {
LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
LOGE("buffer = %p, size = %zu\n", out_buffer.buffer, out_buffer.size);
- ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, i, &out_type);
+ ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type);
if (ret != ML_ERROR_NONE) {
LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
out_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
out_buffer.owner_is_backend = 1;
- buffers.push_back(out_buffer);
+ buffers.insert(std::make_pair(output.first, out_buffer));
}
LOGI("LEAVE");
LOGI("ENTER");
// TODO. Need to check if model file loading is done.
- int ret;
- unsigned int cnt;
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
- ret = ml_tensors_info_get_count(mInputInfoHandle, &cnt);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("input tensor count = %u", cnt);
-
- for (unsigned int i = 0; i < cnt; ++i) {
+ for (auto& input : mDesignated_inputs) {
inference_engine_tensor_info tensor_info;
ml_tensor_type_e in_type;
ml_tensor_dimension in_dim;
- char *in_name = NULL;
size_t in_size = 1;
- ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, i, &in_type);
+ ret = ml_tensors_info_get_tensor_type(mInputInfoHandle, input.second, &in_type);
if (ret != ML_ERROR_NONE) {
LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).",
ret);
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
}
- ret = ml_tensors_info_get_tensor_dimension(mInputInfoHandle, i, in_dim);
+ ret = ml_tensors_info_get_tensor_dimension(mInputInfoHandle, input.second, in_dim);
if (ret != ML_ERROR_NONE) {
LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).",
ret);
LOGI("input tensor size = %zu", in_size);
- ret = ml_tensors_info_get_tensor_name(mInputInfoHandle, i, &in_name);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
- ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("input tensor name = %s", in_name);
+ LOGI("input tensor name = %s", input.first.c_str());
tensor_info.data_type = static_cast<inference_tensor_data_type_e>(type);
tensor_info.size = in_size;
- property.tensor_infos.push_back(tensor_info);
+ property.layers.insert(std::make_pair(input.first, tensor_info));
// TODO. Compare tensor info from engine to one from a given property.
}
- property.layer_names = mInputProperty.layer_names;
-
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
LOGI("ENTER");
// TODO. Need to check if model file loading is done.
- int ret;
- unsigned int cnt;
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
- ret = ml_tensors_info_get_count(mOutputInfoHandle, &cnt);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("output tensor count = %u", cnt);
-
- for (unsigned int i = 0; i < cnt; ++i) {
+ for (auto& output : mDesignated_outputs) {
inference_engine_tensor_info tensor_info;
ml_tensor_type_e out_type;
unsigned int out_dim[ML_TENSOR_RANK_LIMIT];
- char *out_name = NULL;
size_t out_size = 1;
- ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, i, &out_type);
+ ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type);
if (ret != ML_ERROR_NONE) {
LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).",
ret);
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
}
- ret = ml_tensors_info_get_tensor_dimension(mOutputInfoHandle, i, out_dim);
+ ret = ml_tensors_info_get_tensor_dimension(mOutputInfoHandle, output.second, out_dim);
if (ret != ML_ERROR_NONE) {
LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).",
ret);
LOGI("output tensor size = %zu", out_size);
- ret = ml_tensors_info_get_tensor_name(mOutputInfoHandle, i, &out_name);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
- ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("output tensor name = %s", out_name);
+ LOGI("output tensor name = %s", output.first.c_str());
tensor_info.data_type = static_cast<inference_tensor_data_type_e>(type);
tensor_info.size = out_size;
- property.tensor_infos.push_back(tensor_info);
+ property.layers.insert(std::make_pair(output.first, tensor_info));
// TODO. Compare tensor info from engine to one from a given property.
}
- property.layer_names = mOutputProperty.layer_names;
-
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
{
LOGI("ENTER");
- std::vector<std::string>::iterator iter;
- for (iter = property.layer_names.begin();
- iter != property.layer_names.end(); iter++) {
- std::string name = *iter;
- LOGI("input layer name = %s", name.c_str());
+ for (auto& layer : property.layers) {
+ LOGI("input layer name = %s", layer.first.c_str());
}
mDesignated_inputs.clear();
- std::vector<std::string>().swap(mDesignated_inputs);
+ std::map<std::string, int>().swap(mDesignated_inputs);
// TODO. Request input property information to a given ML Single API of nnstreamer backend,
// and set it instead of user-given one,
// Call UpdateTensorsInfo() after requesting input info.
- mDesignated_inputs = property.layer_names;
+
mInputProperty = property;
LOGI("LEAVE");
{
LOGI("ENTER");
- std::vector<std::string>::iterator iter;
- for (iter = property.layer_names.begin();
- iter != property.layer_names.end(); iter++) {
- std::string name = *iter;
- LOGI("output layer name = %s", name.c_str());
+ for (auto& layer : property.layers) {
+ LOGI("output layer name = %s", layer.first.c_str());
}
mDesignated_outputs.clear();
- std::vector<std::string>().swap(mDesignated_outputs);
+ std::map<std::string, int>().swap(mDesignated_outputs);
// TODO. Request output property information to a given ML Single API of nnstreamer backend,
// and set it instead of user-given one,
// Call UpdateTensorsInfo() after requesting output info.
- mDesignated_outputs = property.layer_names;
+
mOutputProperty = property;
LOGI("LEAVE");
}
int InferenceMLAPI::CheckTensorBuffers(
- std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+ std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
{
LOGI("ENTER");
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
+ unsigned int cnt = 0;
+ ret = ml_tensors_info_get_count(mInputInfoHandle, &cnt);
+ if (ret != ML_ERROR_NONE || !cnt) {
+ LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("input tensor count = %u", cnt);
+ mDesignated_inputs.clear();
+ std::map<std::string, int>().swap(mDesignated_inputs);
+ for(unsigned int index = 0; index < cnt; ++index) {
+ char *in_name = NULL;
+ ret = ml_tensors_info_get_tensor_name(mInputInfoHandle, index, &in_name);
+ LOGI("index:%d with name %s", index, in_name);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
+ ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ if (in_name == NULL)
+ continue;
+
+ mDesignated_inputs.insert(std::make_pair(std::string(in_name), index));
+ free(in_name);
+ }
+
ret = ml_single_get_output_info(mSingle, &mOutputInfoHandle);
if (ret != ML_ERROR_NONE) {
LOGE("Failed to request ml_single_get_output_info(%d).", ret);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
+ cnt = 0;
+ ret = ml_tensors_info_get_count(mOutputInfoHandle, &cnt);
+ if (ret != ML_ERROR_NONE || cnt == 0) {
+ LOGE("Failed to request ml_tensors_info_get_count(%d) with cnt %u.", ret, cnt);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("output tensor count = %u", cnt);
+ mDesignated_outputs.clear();
+ std::map<std::string, int>().swap(mDesignated_outputs);
+ for (unsigned int index = 0; index < cnt; ++index) {
+ char *out_name = NULL;
+ ret = ml_tensors_info_get_tensor_name(mOutputInfoHandle, index, &out_name);
+ LOGI("index:%u with name %s", index, out_name);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
+ ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ if (out_name == NULL)
+ continue;
+
+ mDesignated_outputs.insert(std::make_pair(std::string(out_name), index));
+ if (out_name != NULL){
+ free(out_name);
+ }
+ }
+
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceMLAPI::Run(
- std::vector<inference_engine_tensor_buffer> &input_buffers,
- std::vector<inference_engine_tensor_buffer> &output_buffers)
+ std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
+ std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
{
LOGI("ENTER");