mDesignated_outputs.clear();
std::vector<std::string>().swap(mDesignated_outputs);
+
+ ml_single_close(mSingle);
}
int InferenceMLAPI::SetPrivateData(void *data)
{
LOGI("ENTER");
- std::string model_str("");
+ std::string model_str(model_paths[0]);
// TODO. Set NNFW backend type and HW type properly.
// ML Single API of MLAPI requires model_paths rule like below,
// "so library file path,nb model file path" or vise versa.
- model_str += model_paths[0] + "," + model_paths[1];
+ model_str += "," + model_paths[1];
break;
case INFERENCE_BACKEND_NNFW:
nnfw_type = ML_NNFW_TYPE_NNFW;
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
- model_str += model_paths[0];
LOGI("NNFW tensor filter will be used.");
break;
// TODO.
{
LOGI("ENTER");
- // Output tensor buffers will be allocated by a backend plugin of ML Single API of nnstreamer
- // So add a null tensor buffer object. This buffer will be updated at Run callback.
-
- // Caution. this tensor buffer will be checked by upper framework to verity if
- // the tensor buffer object is valid or not so fill dummy data to the tensor buffer.
-
- // TODO. Consider multiple output tensors.
-
- inference_engine_tensor_buffer tensor_buf = {
- 0,
- };
- tensor_buf.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16;
- tensor_buf.buffer = (void *) 1;
- tensor_buf.size = 1;
- tensor_buf.owner_is_backend = 1;
- buffers.push_back(tensor_buf);
+ // TODO. Implement this function according to a given ML Single API backend properly.
LOGI("LEAVE");
LOGI("input tensor count = %u", cnt);
for (unsigned int i = 0; i < cnt; ++i) {
+ inference_engine_tensor_info tensor_info;
ml_tensor_type_e in_type;
- unsigned int in_dim;
+ unsigned int in_dim[ML_TENSOR_RANK_LIMIT];
char *in_name = NULL;
- size_t in_size;
+ size_t in_size = 1;
ret = ml_tensors_info_get_tensor_type(in_info, i, &in_type);
if (ret != ML_ERROR_NONE) {
LOGI("input tensor type = %d", in_type);
- ret = ml_tensors_info_get_tensor_dimension(in_info, i, &in_dim);
+ int type = ConvertTensorType(in_type);
+ if (type == -1) {
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ }
+
+ ret = ml_tensors_info_get_tensor_dimension(in_info, i, in_dim);
if (ret != ML_ERROR_NONE) {
LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).",
ret);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- LOGI("input tensor dimension = %u", in_dim);
+ LOGI("Input tensor dimension:");
+ for (unsigned int shape_idx = 0; shape_idx < ML_TENSOR_RANK_LIMIT; ++shape_idx) {
+ tensor_info.shape.push_back(in_dim[shape_idx]);
+ in_size *= static_cast<size_t>(in_dim[shape_idx]);
+ LOGI("%u", in_dim[shape_idx]);
+ }
+
+ LOGI("input tensor size = %u", in_size);
ret = ml_tensors_info_get_tensor_name(in_info, i, &in_name);
if (ret != ML_ERROR_NONE) {
LOGI("input tensor name = %s", in_name);
- ret = ml_tensors_info_get_tensor_size(in_info, i, &in_size);
- if (ret != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_tensor_size(%d).",
- ret);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
+ tensor_info.data_type = static_cast<inference_tensor_data_type_e>(type);
+ tensor_info.size = in_size;
- LOGI("input tensor size = %u", in_size);
+ property.tensor_infos.push_back(tensor_info);
// TODO. Compare tensor info from engine to one from a given property.
}
property.layer_names = mInputProperty.layer_names;
- std::vector<inference_engine_tensor_info>::iterator iter;
- for (iter = mInputProperty.tensor_infos.begin();
- iter != mInputProperty.tensor_infos.end(); iter++) {
- inference_engine_tensor_info tensor_info = *iter;
- property.tensor_infos.push_back(tensor_info);
- }
-
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
{
LOGI("ENTER");
- property.layer_names = mOutputProperty.layer_names;
+ ml_tensors_info_h out_info = NULL;
+
+ // TODO. Need to check if model file loading is done.
+
+ int ret = ml_single_get_output_info(mSingle, &out_info);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_single_get_output_info(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ unsigned int cnt;
+ ret = ml_tensors_info_get_count(out_info, &cnt);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("output tensor count = %u", cnt);
+
+ for (unsigned int i = 0; i < cnt; ++i) {
+ inference_engine_tensor_info tensor_info;
+ ml_tensor_type_e out_type;
+ unsigned int out_dim[ML_TENSOR_RANK_LIMIT];
+ char *out_name = NULL;
+ size_t out_size = 1;
+
+ ret = ml_tensors_info_get_tensor_type(out_info, i, &out_type);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).",
+ ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("output tensor type = %d", out_type);
- inference_engine_tensor_info tensor_info;
+ int type = ConvertTensorType(out_type);
+ if (type == -1) {
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ }
+
+ ret = ml_tensors_info_get_tensor_dimension(out_info, i, out_dim);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).",
+ ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ int shape_size = 0;
+
+ LOGI("Output tensor dimension:");
+
+ for (unsigned int shape_idx = 0; shape_idx < ML_TENSOR_RANK_LIMIT; ++shape_idx) {
+ out_size *= static_cast<size_t>(out_dim[shape_idx]);
- // TODO. Set tensor info from a given ML Single API of nnstreamer backend instead of fixed one.
+ if (out_dim[shape_idx] == 1 && shape_size == 0)
+ shape_size = shape_idx;
- tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16;
- tensor_info.shape = { 1, 1001 };
- tensor_info.size = 1001;
- property.tensor_infos.push_back(tensor_info);
+ LOGI("%d", out_dim[shape_idx]);
+ }
+
+ LOGI("Shape size of output tensor : %d", shape_size);
+ LOGI("Reversed output tensor dimension:");
+
+ // Reverse shape order.
+ for (int idx = shape_size; idx >= 0; --idx) {
+ tensor_info.shape.push_back(out_dim[idx]);
+ LOGI("%u", out_dim[idx]);
+ }
+
+ LOGI("output tensor size = %zu", out_size);
+
+ ret = ml_tensors_info_get_tensor_name(out_info, i, &out_name);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
+ ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("output tensor name = %s", out_name);
+
+ tensor_info.data_type = static_cast<inference_tensor_data_type_e>(type);
+ tensor_info.size = out_size;
+
+ property.tensor_infos.push_back(tensor_info);
+
+ // TODO. Compare tensor info from engine to one from a given property.
+ }
+
+ property.layer_names = mOutputProperty.layer_names;
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
+ int InferenceMLAPI::ConvertTensorType(int tensor_type)
+ {
+ LOGI("ENTER");
+
+ switch (tensor_type) {
+ case ML_TENSOR_TYPE_FLOAT32:
+ return INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ case ML_TENSOR_TYPE_UINT8:
+ return INFERENCE_TENSOR_DATA_TYPE_UINT8;
+ case ML_TENSOR_TYPE_UINT16:
+ return INFERENCE_TENSOR_DATA_TYPE_UINT16;
+ default:
+ LOGE("Tensor type(%d) is invalid.", tensor_type);
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ LOGI("LEAVE");
+
+ return -1;
+ }
+
int InferenceMLAPI::Run(
std::vector<inference_engine_tensor_buffer> &input_buffers,
std::vector<inference_engine_tensor_buffer> &output_buffers)
}
LOGI("Output tensor = %u", output_buffers[0].size);
-
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;