return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
+ if (mInputInfoHandle) {
+ ml_tensors_info_destroy(mInputInfoHandle);
+ mInputInfoHandle = NULL;
+ }
+
+ err = ml_single_get_input_info(mSingle, &mInputInfoHandle);
+ if (err != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_single_get_input_info(%d).", err);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ if (mOutputInfoHandle) {
+ ml_tensors_info_destroy(mOutputInfoHandle);
+ mOutputInfoHandle = NULL;
+ }
+
+ err = ml_single_get_output_info(mSingle, &mOutputInfoHandle);
+ if (err != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_single_get_output_info(%d).", err);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
err = UpdateTensorsInfo();
if (err != INFERENCE_ENGINE_ERROR_NONE) {
ml_single_close(mSingle);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- if (mInputInfoHandle) {
- ml_tensors_info_destroy(mInputInfoHandle);
- mInputInfoHandle = NULL;
- }
-
- if (mOutputInfoHandle) {
- ml_tensors_info_destroy(mOutputInfoHandle);
- mOutputInfoHandle = NULL;
- }
+ unsigned int input_tensor_cnt = 0;
- int ret = ml_single_get_input_info(mSingle, &mInputInfoHandle);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_single_get_input_info(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- unsigned int cnt = 0;
- ret = ml_tensors_info_get_count(mInputInfoHandle, &cnt);
- if (ret != ML_ERROR_NONE || !cnt) {
- LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- LOGI("input tensor count = %u", cnt);
- mDesignated_inputs.clear();
- std::map<std::string, int>().swap(mDesignated_inputs);
- for(unsigned int index = 0; index < cnt; ++index) {
- char *in_name = NULL;
- ret = ml_tensors_info_get_tensor_name(mInputInfoHandle, index, &in_name);
- LOGI("index:%d with name %s", index, in_name);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
- ret);
+ // If user-given input layer information exists then use it.
+ if (!mInputProperty.layers.empty()) {
+ for (auto& iter : mInputProperty.layers) {
+ LOGI("index:%d with name %s", input_tensor_cnt, iter.first.c_str());
+ mDesignated_inputs.insert(std::make_pair(iter.first, input_tensor_cnt));
+ input_tensor_cnt++;
+ }
+ // Otherwise, request input layer information to tensor filter.
+ } else {
+ int ret = ml_tensors_info_get_count(mInputInfoHandle, &input_tensor_cnt);
+ if (ret != ML_ERROR_NONE || !input_tensor_cnt) {
+ LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- if (in_name == NULL)
- continue;
+ for(unsigned int index = 0; index < input_tensor_cnt; ++index) {
+ char *in_name = NULL;
+ ret = ml_tensors_info_get_tensor_name(mInputInfoHandle, index, &in_name);
+ LOGI("index:%d with name %s", index, in_name);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
- mDesignated_inputs.insert(std::make_pair(std::string(in_name), index));
- free(in_name);
- }
+ if (in_name == NULL)
+ continue;
- ret = ml_single_get_output_info(mSingle, &mOutputInfoHandle);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_single_get_output_info(%d).", ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ mDesignated_inputs.insert(std::make_pair(std::string(in_name), index));
+ free(in_name);
+ }
}
- cnt = 0;
- ret = ml_tensors_info_get_count(mOutputInfoHandle, &cnt);
- if (ret != ML_ERROR_NONE || cnt == 0) {
- LOGE("Failed to request ml_tensors_info_get_count(%d) with cnt %u.", ret, cnt);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
+ LOGI("input tensor count = %u", input_tensor_cnt);
- LOGI("output tensor count = %u", cnt);
- mDesignated_outputs.clear();
- std::map<std::string, int>().swap(mDesignated_outputs);
- for (unsigned int index = 0; index < cnt; ++index) {
- char *out_name = NULL;
- ret = ml_tensors_info_get_tensor_name(mOutputInfoHandle, index, &out_name);
- LOGI("index:%u with name %s", index, out_name);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
- ret);
+ unsigned int output_tensor_cnt = 0;
+
+ // If user-given output layer information exists then use it.
+ if (!mOutputProperty.layers.empty()) {
+ int index = 0;
+ for(auto& iter : mOutputProperty.layers){
+ LOGI("index:%d with name %s", index, iter.first.c_str());
+ mDesignated_outputs.insert(std::make_pair(iter.first, index));
+ index++;
+ }
+
+ output_tensor_cnt = index;
+ // Otherwise, request output layer information to tensor filter.
+ } else {
+ int ret = ml_tensors_info_get_count(mOutputInfoHandle, &output_tensor_cnt);
+ if (ret != ML_ERROR_NONE || output_tensor_cnt == 0) {
+ LOGE("Failed to request ml_tensors_info_get_count(%d).", output_tensor_cnt);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- if (out_name == NULL)
- continue;
+ for (unsigned int index = 0; index < output_tensor_cnt; ++index) {
+ char *out_name = NULL;
- mDesignated_outputs.insert(std::make_pair(std::string(out_name), index));
- if (out_name != NULL){
+ ret = ml_tensors_info_get_tensor_name(mOutputInfoHandle, index, &out_name);
+ LOGI("index:%u with name %s", index, out_name);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
+ ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ if (out_name == NULL)
+ continue;
+
+ mDesignated_outputs.insert(std::make_pair(std::string(out_name), index));
free(out_name);
}
}
+ LOGI("output tensor count = %u", output_tensor_cnt);
+
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}