Use pre-defined tensor data handles
authorInki Dae <inki.dae@samsung.com>
Tue, 15 Sep 2020 08:30:27 +0000 (17:30 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 16 Sep 2020 05:09:46 +0000 (14:09 +0900)
For invoke request, we don't have to get input tensor information
because we can get the information at GetInputTensorBuffers
and GetOutputTensorBuffers, and use them instead.

Change-Id: I6d2ac7fcb8d4ed129deb54eca7739038571b230e
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_mlapi.cpp
src/inference_engine_mlapi_private.h

index 1a191b8..d683b0e 100644 (file)
@@ -31,14 +31,12 @@ namespace MLAPIImpl
                        mPluginType(),
                        mTargetDevice(),
                        mSingle(),
+                       mInputDataHandle(),
+                       mOutputDataHandle(),
                        mDesignated_inputs(),
                        mDesignated_outputs(),
                        mInputProperty(),
-                       mOutputProperty(),
-                       mInputTensorBuffer(),
-                       mOutputTensorBuffer(),
-                       mInputTensorInfo(),
-                       mOutputTensorInfo()
+                       mOutputProperty()
        {
                LOGI("ENTER");
 
@@ -54,6 +52,15 @@ namespace MLAPIImpl
                std::vector<std::string>().swap(mDesignated_outputs);
 
                ml_single_close(mSingle);
+
+               if (mInputDataHandle)
+                       ml_tensors_data_destroy(mInputDataHandle);
+
+               if (mOutputDataHandle)
+                       ml_tensors_data_destroy(mOutputDataHandle);
+
+               mInputDataHandle = NULL;
+               mOutputDataHandle = NULL;
        }
 
        int InferenceMLAPI::SetPrivateData(void *data)
@@ -194,8 +201,70 @@ namespace MLAPIImpl
        {
                LOGI("ENTER");
 
+               buffers.clear();
+
                // TODO. Implement this function according to a given ML Single API backend properly.
 
+               ml_tensors_info_h in_info = NULL;
+
+               int ret = ml_single_get_input_info(mSingle, &in_info);
+               if (ret != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_single_get_input_info(%d).", ret);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               // ML Single API will always provide internal tensor buffers so
+               // get the tensor buffers back to Mediavision framework so that
+               // Mediavision framework doesn't allocate the tensor buffers internally.
+
+               unsigned int cnt;
+
+               ret = ml_tensors_info_get_count(in_info, &cnt);
+               if (ret != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               LOGI("input tensor count = %u", cnt);
+
+               for (unsigned int i = 0; i < cnt; ++i) {
+                       inference_engine_tensor_buffer in_buffer;
+                       ml_tensor_type_e in_type;
+
+                       ret = ml_tensors_data_create(in_info, &mInputDataHandle);
+                       if (ret != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_data_create(%d).", ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       ret = ml_tensors_data_get_tensor_data(mInputDataHandle, i, &in_buffer.buffer, &in_buffer.size);
+                       if (ret != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       LOGE("buffer = %p, size = %d\n", in_buffer.buffer, in_buffer.size);
+
+                       int ret = ml_tensors_info_get_tensor_type(in_info, i, &in_type);
+                       if (ret != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).",
+                                        ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       LOGI("input tensor type = %d", in_type);
+
+                       int type = ConvertTensorType(in_type);
+                       if (type == -1) {
+                               return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+                       }
+
+                       in_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
+                       in_buffer.owner_is_backend = 1;
+
+                       buffers.push_back(in_buffer);
+               }
+
                LOGI("LEAVE");
 
                return INFERENCE_ENGINE_ERROR_NONE;
@@ -206,7 +275,69 @@ namespace MLAPIImpl
        {
                LOGI("ENTER");
 
-               // TODO. Implement this function according to a given ML Single API backend properly.
+               buffers.clear();
+
+               // TODO. Need to check if model file loading is done.
+
+               ml_tensors_info_h out_info = NULL;
+
+               int ret = ml_single_get_output_info(mSingle, &out_info);
+               if (ret != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_single_get_output_info(%d).", ret);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               // ML Single API will always provide internal tensor buffers so
+               // get the tensor buffers back to Mediavision framework so that
+               // Mediavision framework doesn't allocate the tensor buffers internally.
+
+               unsigned int cnt;
+
+               ret = ml_tensors_info_get_count(out_info, &cnt);
+               if (ret != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               LOGI("output tensor count = %u", cnt);
+
+               for (unsigned int i = 0; i < cnt; ++i) {
+                       inference_engine_tensor_buffer out_buffer;
+                       ml_tensor_type_e out_type;
+
+                       ret = ml_tensors_data_create(out_info, &mOutputDataHandle);
+                       if (ret != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_data_create(%d).", ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       ret = ml_tensors_data_get_tensor_data(mOutputDataHandle, i, &out_buffer.buffer, &out_buffer.size);
+                       if (ret != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       LOGE("buffer = %p, size = %d\n", out_buffer.buffer, out_buffer.size);
+
+                       ret = ml_tensors_info_get_tensor_type(out_info, i, &out_type);
+                       if (ret != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).",
+                                        ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       LOGI("output tensor type = %d", out_type);
+
+                       int type = ConvertTensorType(out_type);
+                       if (type == -1) {
+                               return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+                       }
+
+                       out_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
+                       out_buffer.owner_is_backend = 1;
+
+                       buffers.push_back(out_buffer);
+               }
 
                LOGI("LEAVE");
 
@@ -520,25 +651,9 @@ namespace MLAPIImpl
                        return err;
                }
 
-               ml_tensors_info_h in_info = NULL;
-
-               err = ml_single_get_input_info(mSingle, &in_info);
-               if (err != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_single_get_input_info(%d).", err);
-                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-               }
-
-               ml_tensors_data_h input_data = NULL;
-               err = ml_tensors_data_create(in_info, &input_data);
+               err = ml_single_invoke(mSingle, mInputDataHandle, &mOutputDataHandle);
                if (err != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_tensors_data_create(%d).", err);
-                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-               }
-
-               unsigned int in_cnt;
-               err = ml_tensors_info_get_count(in_info, &in_cnt);
-               if (err != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_tensors_info_get_count(%d).", err);
+                       LOGE("Failed to request ml_single_invoke(%d).", err);
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
@@ -551,41 +666,29 @@ namespace MLAPIImpl
                }
 
                unsigned int out_cnt;
+
                err = ml_tensors_info_get_count(out_info, &out_cnt);
                if (err != ML_ERROR_NONE) {
                        LOGE("Failed to request ml_tensors_info_get_count(%d).", err);
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
-               for (unsigned int i = 0; i < in_cnt; ++i) {
-                       LOGI("index(%d) : buffer = %p, size = %zu\n", i,
-                                input_buffers[i].buffer, input_buffers[i].size);
-                       err = ml_tensors_data_set_tensor_data(input_data, i,
-                                                                                                 input_buffers[i].buffer,
-                                                                                                 input_buffers[i].size);
-                       if (err != ML_ERROR_NONE) {
-                               LOGE("Failed to request ml_tensors_data_set_tensor_data(%d).",
-                                        err);
-                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-                       }
-               }
-
-               ml_tensors_data_h output_data = NULL;
-               err = ml_single_invoke(mSingle, input_data, &output_data);
-               if (err != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_single_invoke(%d).", err);
-                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
-               }
-
+               // TODO. Why below code is required?
+               // ML Single API provides internal tensor buffer for output tensor
+               // and user alreadys know the buffer by GetOutputTensorBuffers.
+               //
+               // However, without below code, user cannot get the output result
+               // correctly. What happens in ML Single API framework?
                for (unsigned int i = 0; i < out_cnt; ++i) {
                        err = ml_tensors_data_get_tensor_data(
-                               output_data, i, (void **) &output_buffers[i].buffer,
+                               mOutputDataHandle, i, (void **) &output_buffers[i].buffer,
                                &output_buffers[i].size);
                        if (err != ML_ERROR_NONE) {
                                LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", err);
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                        }
-                       LOGI("Output tensor[%u] = %zu", i, output_buffers[0].size);
+
+                       LOGI("Output tensor[%u] = %zu", i, output_buffers[i].size);
                }
 
                LOGI("LEAVE");
index 6612eb7..b6b4b1e 100644 (file)
@@ -81,14 +81,12 @@ namespace MLAPIImpl
                int mPluginType;
                int mTargetDevice;
                ml_single_h mSingle;
+               ml_tensors_data_h mInputDataHandle;
+               ml_tensors_data_h mOutputDataHandle;
                std::vector<std::string> mDesignated_inputs;
                std::vector<std::string> mDesignated_outputs;
                inference_engine_layer_property mInputProperty;
                inference_engine_layer_property mOutputProperty;
-               std::vector<inference_engine_tensor_buffer> mInputTensorBuffer;
-               std::vector<inference_engine_tensor_buffer> mOutputTensorBuffer;
-               std::vector<inference_engine_tensor_info> mInputTensorInfo;
-               std::vector<inference_engine_tensor_info> mOutputTensorInfo;
        };
 
 } /* InferenceEngineImpl */