mPluginType(),
mTargetDevice(),
mSingle(),
+ mInputDataHandle(),
+ mOutputDataHandle(),
mDesignated_inputs(),
mDesignated_outputs(),
mInputProperty(),
- mOutputProperty(),
- mInputTensorBuffer(),
- mOutputTensorBuffer(),
- mInputTensorInfo(),
- mOutputTensorInfo()
+ mOutputProperty()
{
LOGI("ENTER");
std::vector<std::string>().swap(mDesignated_outputs);
ml_single_close(mSingle);
+
+ if (mInputDataHandle)
+ ml_tensors_data_destroy(mInputDataHandle);
+
+ if (mOutputDataHandle)
+ ml_tensors_data_destroy(mOutputDataHandle);
+
+ mInputDataHandle = NULL;
+ mOutputDataHandle = NULL;
}
int InferenceMLAPI::SetPrivateData(void *data)
{
LOGI("ENTER");
+ buffers.clear();
+
// TODO. Implement this function according to a given ML Single API backend properly.
+ ml_tensors_info_h in_info = NULL;
+
+ int ret = ml_single_get_input_info(mSingle, &in_info);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_single_get_input_info(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ // ML Single API will always provide internal tensor buffers so
+ // get the tensor buffers back to Mediavision framework so that
+ // Mediavision framework doesn't allocate the tensor buffers internally.
+
+ unsigned int cnt;
+
+ ret = ml_tensors_info_get_count(in_info, &cnt);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("input tensor count = %u", cnt);
+
+ for (unsigned int i = 0; i < cnt; ++i) {
+ inference_engine_tensor_buffer in_buffer;
+ ml_tensor_type_e in_type;
+
+ ret = ml_tensors_data_create(in_info, &mInputDataHandle);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_data_create(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ ret = ml_tensors_data_get_tensor_data(mInputDataHandle, i, &in_buffer.buffer, &in_buffer.size);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGE("buffer = %p, size = %d\n", in_buffer.buffer, in_buffer.size);
+
+ int ret = ml_tensors_info_get_tensor_type(in_info, i, &in_type);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).",
+ ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("input tensor type = %d", in_type);
+
+ int type = ConvertTensorType(in_type);
+ if (type == -1) {
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ }
+
+ in_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
+ in_buffer.owner_is_backend = 1;
+
+ buffers.push_back(in_buffer);
+ }
+
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
{
LOGI("ENTER");
- // TODO. Implement this function according to a given ML Single API backend properly.
+ buffers.clear();
+
+ // TODO. Need to check if model file loading is done.
+
+ ml_tensors_info_h out_info = NULL;
+
+ int ret = ml_single_get_output_info(mSingle, &out_info);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_single_get_output_info(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ // ML Single API will always provide internal tensor buffers so
+ // get the tensor buffers back to Mediavision framework so that
+ // Mediavision framework doesn't allocate the tensor buffers internally.
+
+ unsigned int cnt;
+
+ ret = ml_tensors_info_get_count(out_info, &cnt);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("output tensor count = %u", cnt);
+
+ for (unsigned int i = 0; i < cnt; ++i) {
+ inference_engine_tensor_buffer out_buffer;
+ ml_tensor_type_e out_type;
+
+ ret = ml_tensors_data_create(out_info, &mOutputDataHandle);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_data_create(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ ret = ml_tensors_data_get_tensor_data(mOutputDataHandle, i, &out_buffer.buffer, &out_buffer.size);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGE("buffer = %p, size = %d\n", out_buffer.buffer, out_buffer.size);
+
+ ret = ml_tensors_info_get_tensor_type(out_info, i, &out_type);
+ if (ret != ML_ERROR_NONE) {
+ LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).",
+ ret);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ LOGI("output tensor type = %d", out_type);
+
+ int type = ConvertTensorType(out_type);
+ if (type == -1) {
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ }
+
+ out_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
+ out_buffer.owner_is_backend = 1;
+
+ buffers.push_back(out_buffer);
+ }
LOGI("LEAVE");
return err;
}
- ml_tensors_info_h in_info = NULL;
-
- err = ml_single_get_input_info(mSingle, &in_info);
- if (err != ML_ERROR_NONE) {
- LOGE("Failed to request ml_single_get_input_info(%d).", err);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- ml_tensors_data_h input_data = NULL;
- err = ml_tensors_data_create(in_info, &input_data);
+ err = ml_single_invoke(mSingle, mInputDataHandle, &mOutputDataHandle);
if (err != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_data_create(%d).", err);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
- unsigned int in_cnt;
- err = ml_tensors_info_get_count(in_info, &in_cnt);
- if (err != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_info_get_count(%d).", err);
+ LOGE("Failed to request ml_single_invoke(%d).", err);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
}
unsigned int out_cnt;
+
err = ml_tensors_info_get_count(out_info, &out_cnt);
if (err != ML_ERROR_NONE) {
LOGE("Failed to request ml_tensors_info_get_count(%d).", err);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- for (unsigned int i = 0; i < in_cnt; ++i) {
- LOGI("index(%d) : buffer = %p, size = %zu\n", i,
- input_buffers[i].buffer, input_buffers[i].size);
- err = ml_tensors_data_set_tensor_data(input_data, i,
- input_buffers[i].buffer,
- input_buffers[i].size);
- if (err != ML_ERROR_NONE) {
- LOGE("Failed to request ml_tensors_data_set_tensor_data(%d).",
- err);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
- }
-
- ml_tensors_data_h output_data = NULL;
- err = ml_single_invoke(mSingle, input_data, &output_data);
- if (err != ML_ERROR_NONE) {
- LOGE("Failed to request ml_single_invoke(%d).", err);
- return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
- }
-
+ // TODO. Why below code is required?
+ // ML Single API provides internal tensor buffer for output tensor
+ // and user alreadys know the buffer by GetOutputTensorBuffers.
+ //
+ // However, without below code, user cannot get the output result
+ // correctly. What happens in ML Single API framework?
for (unsigned int i = 0; i < out_cnt; ++i) {
err = ml_tensors_data_get_tensor_data(
- output_data, i, (void **) &output_buffers[i].buffer,
+ mOutputDataHandle, i, (void **) &output_buffers[i].buffer,
&output_buffers[i].size);
if (err != ML_ERROR_NONE) {
LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", err);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- LOGI("Output tensor[%u] = %zu", i, output_buffers[0].size);
+
+ LOGI("Output tensor[%u] = %zu", i, output_buffers[i].size);
}
LOGI("LEAVE");