Support multiple output tensor 98/242198/2
authorHyo Jong Kim <hue.kim@samsung.com>
Tue, 25 Aug 2020 02:20:46 +0000 (11:20 +0900)
committerHyo Jong Kim <hue.kim@samsung.com>
Tue, 25 Aug 2020 02:24:48 +0000 (11:24 +0900)
Get the information and the number of output tensor
Set the output tensor according to that number

Change-Id: Ie803aa0aee194091006db29bd86a3d24a4f922df
Signed-off-by: Hyo Jong Kim <hue.kim@samsung.com>
src/inference_engine_mlapi.cpp

index b4d7a048337d8feb0c14acb3a8e6db52856d27f2..7570359239a01abd2c2899b8121ce7dc4b62d1d6 100644 (file)
@@ -524,14 +524,29 @@ namespace MLAPIImpl
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
-               unsigned int cnt;
-               err = ml_tensors_info_get_count(in_info, &cnt);
+               unsigned int in_cnt;
+               err = ml_tensors_info_get_count(in_info, &in_cnt);
                if (err != ML_ERROR_NONE) {
                        LOGE("Failed to request ml_tensors_info_get_count(%d).", err);
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
-               for (unsigned int i = 0; i < cnt; ++i) {
+               ml_tensors_info_h out_info = NULL;
+
+               err = ml_single_get_output_info(mSingle, &out_info);
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_single_get_output_info(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               unsigned int out_cnt;
+               err = ml_tensors_info_get_count(out_info, &out_cnt);
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_tensors_info_get_count(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               for (unsigned int i = 0; i < in_cnt; ++i) {
                        LOGI("index(%d) : buffer = %p, size = %zu\n", i,
                                 input_buffers[i].buffer, input_buffers[i].size);
                        err = ml_tensors_data_set_tensor_data(input_data, i,
@@ -551,17 +566,17 @@ namespace MLAPIImpl
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
-               // TODO. Consider multiple output tensors.
-
-               err = ml_tensors_data_get_tensor_data(
-                               output_data, 0, (void **) &output_buffers[0].buffer,
-                               &output_buffers[0].size);
-               if (err != ML_ERROR_NONE) {
-                       LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", err);
-                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               for (unsigned int i = 0; i < out_cnt; ++i) {
+                       err = ml_tensors_data_get_tensor_data(
+                               output_data, i, (void **) &output_buffers[i].buffer,
+                               &output_buffers[i].size);
+                       if (err != ML_ERROR_NONE) {
+                               LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", err);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+                       LOGI("Output tensor[%u] = %zu", i, output_buffers[0].size);
                }
 
-               LOGI("Output tensor = %zu", output_buffers[0].size);
                LOGI("LEAVE");
 
                return INFERENCE_ENGINE_ERROR_NONE;