mv_machine_learning: introduce get_result_count API for landmark detection 83/305083/1
authorInki Dae <inki.dae@samsung.com>
Fri, 26 Jan 2024 08:42:01 +0000 (17:42 +0900)
committerInki Dae <inki.dae@samsung.com>
Fri, 26 Jan 2024 08:42:01 +0000 (17:42 +0900)
[Issue type] : new feature

Introduce get_result_count API for landmark detection task group.

In user perspective, this API provides information on how many results exist
so that user can request each result corresponding to a user-given index.
And also, in framework perspective, it provides consistent API behavior -
get_result_count API call updates _current_result of task group by calling
getOutput function of ITask, and get_result API call returns _current_result
value by calling getOutputCache function of ITask.

And we are enough with get_result_count and get_result API so drop existing
get_pos and get_positions API.

Change-Id: Ifabe8e2c50db58e95ae67bf1a5fd54586e8facc1
Signed-off-by: Inki Dae <inki.dae@samsung.com>
include/mv_facial_landmark_internal.h
include/mv_pose_landmark_internal.h
mv_machine_learning/landmark_detection/include/ILandmarkDetection.h
mv_machine_learning/landmark_detection/include/LandmarkDetection.h
mv_machine_learning/landmark_detection/src/FacialLandmarkAdapter.cpp
mv_machine_learning/landmark_detection/src/LandmarkDetection.cpp
mv_machine_learning/landmark_detection/src/PoseLandmarkAdapter.cpp
mv_machine_learning/landmark_detection/src/mv_facial_landmark.cpp
mv_machine_learning/landmark_detection/src/mv_pose_landmark.cpp
test/testsuites/machine_learning/landmark_detection/test_landmark_detection.cpp
test/testsuites/machine_learning/landmark_detection/test_landmark_detection_async.cpp

index a1f85b5f307d4cb69e553d774acde54de57748d7..63ce2110cccb813dfc6216a16f032e629fdeda5c 100644 (file)
@@ -194,7 +194,30 @@ int mv_facial_landmark_inference_async(mv_facial_landmark_h handle, mv_source_h
 
 /**
  * @internal
- * @brief Gets the facial landmark positions on the @a source.
+ * @brief Gets the result count to objects.
+ *
+ * @since_tizen 9.0
+ *
+ * @param[in] handle       The handle to the inference
+ * @param[out] result_cnt  A number of results.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL          Internal error
+ *
+ * @pre Create a source handle by calling mv_create_source()
+ * @pre Create an inference handle by calling mv_facial_landmark_create()
+ * @pre Prepare an inference by calling mv_facial_landmark_configure()
+ * @pre Prepare an inference by calling mv_facial_landmark_prepare()
+ * @pre Request an inference by calling mv_facial_landmark_inference()
+ */
+int mv_facial_landmark_get_result_count(mv_facial_landmark_h handle, unsigned int *result_cnt);
+
+/**
+ * @internal
+ * @brief Gets the facial landmark position values to a given index.
  *
  * @since_tizen 9.0
  * @remarks pos_x and pos_y arrays are allocated internally by the framework and will remain valid
@@ -211,7 +234,8 @@ int mv_facial_landmark_inference_async(mv_facial_landmark_h handle, mv_source_h
  *          in asynchronous mode until the handle is released.
  *
  * @param[in] handle               The handle to the inference
- * @param[out] number_of_landmarks A number of landmarks detected.
+ * @param[in] index                A result index.
+ * @param[out] frame_number        A frame number inferenced.
  * @param[out] pos_x               An array containing x-coordinate values.
  * @param[out] pos_y               An array containing y-coordinate values.
  *
@@ -226,9 +250,10 @@ int mv_facial_landmark_inference_async(mv_facial_landmark_h handle, mv_source_h
  * @pre Prepare an inference by calling mv_facial_landmark_configure()
  * @pre Prepare an inference by calling mv_facial_landmark_prepare()
  * @pre Prepare an inference by calling mv_facial_landmark_inference()
+ * @pre Get result count by calling mv_facial_landmark_get_result_count()
  */
-int mv_facial_landmark_get_positions(mv_facial_landmark_h handle, unsigned int *number_of_landmarks,
-                                                                        unsigned int **pos_x, unsigned int **pos_y);
+int mv_facial_landmark_get_result(mv_facial_landmark_h handle, unsigned int index, unsigned long *frame_number,
+                                                                 unsigned int *pos_x, unsigned int *pos_y);
 
 /**
  * @internal
index 6c33ee2ba8de5bb4a785f0dcb7dadd69e968b836..67b93b8f67e8e92b814ed90ebf09c6e2fe71aeb9 100644 (file)
@@ -170,7 +170,31 @@ int mv_pose_landmark_inference(mv_pose_landmark_h handle, mv_source_h source);
 int mv_pose_landmark_inference_async(mv_pose_landmark_h handle, mv_source_h source);
 
 /**
- * @brief Gets the pose landmark positions on the @a source.
+ * @internal
+ * @brief Gets the result count to objects.
+ *
+ * @since_tizen 9.0
+ *
+ * @param[in] handle       The handle to the inference
+ * @param[out] result_cnt  A number of results.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL          Internal error
+ *
+ * @pre Create a source handle by calling mv_create_source()
+ * @pre Create an inference handle by calling mv_pose_landmark_create()
+ * @pre Prepare an inference by calling mv_pose_landmark_configure()
+ * @pre Prepare an inference by calling mv_pose_landmark_prepare()
+ * @pre Request an inference by calling mv_pose_landmark_inference()
+ */
+int mv_pose_landmark_get_result_count(mv_pose_landmark_h handle, unsigned int *result_cnt);
+
+/**
+ * @internal
+ * @brief Gets the pose landmark position values to a given index.
  *
  * @since_tizen 9.0
  * @remarks pos_x and pos_y arrays are allocated internally by the framework and will remain valid
@@ -179,15 +203,16 @@ int mv_pose_landmark_inference_async(mv_pose_landmark_h handle, mv_source_h sour
  *          please copy them to user memory and use the copy.
  *
  *          This function operates differently depending on the inference request method.
- *          - After mv_pose_landmark_inference() calls, this function returns pose landmark positions immediately.
- *          - After mv_pose_landmark_inference_async() calls, this function can be blocked until the asynchronous inference request is completed
- *            or, the timeout occurs if no result within 3 seconds.
+ *          - After mv_facial_landmark_inference() calls, this function returns facial landmark positions immediately.
+ *          - After mv_facial_landmark_inference_async() calls, this function can be blocked until the asynchronous inference request is completed
+ *            or the timeout occurs if no result within 3 seconds.
  *
- *          Additionally, after calling the mv_pose_landmark_inference_async function, the function operates
+ *          Additionally, after calling the mv_facial_landmark_inference_async function, the function operates
  *          in asynchronous mode until the handle is released.
  *
  * @param[in] handle               The handle to the inference
- * @param[out] number_of_landmarks A number of landmarks detected.
+ * @param[in] index                A result index.
+ * @param[out] frame_number        A frame number inferenced.
  * @param[out] pos_x               An array containing x-coordinate values.
  * @param[out] pos_y               An array containing y-coordinate values.
  *
@@ -202,9 +227,10 @@ int mv_pose_landmark_inference_async(mv_pose_landmark_h handle, mv_source_h sour
  * @pre Prepare an inference by calling mv_pose_landmark_configure()
  * @pre Prepare an inference by calling mv_pose_landmark_prepare()
  * @pre Prepare an inference by calling mv_pose_landmark_inference()
+ * @pre Get result count by calling mv_pose_landmark_get_result_count()
  */
-int mv_pose_landmark_get_pos(mv_pose_landmark_h handle, unsigned int *number_of_landmarks, unsigned int **pos_x,
-                                                        unsigned int **pos_y);
+int mv_pose_landmark_get_result(mv_pose_landmark_h handle, unsigned int index, unsigned long *frame_number,
+                                                               unsigned int *pos_x, unsigned int *pos_y);
 
 /**
  * @brief Set user-given backend and device types for inference.
index 0d709adbc98fa9f1a6c7ff13677c358c534aa576..7c799bf622c5f4330b9613b3956bdf5d92171a6e 100644 (file)
@@ -42,6 +42,7 @@ public:
        virtual void perform(mv_source_h &mv_src) = 0;
        virtual void performAsync(LandmarkDetectionInput &input) = 0;
        virtual LandmarkDetectionResult &getOutput() = 0;
+       virtual LandmarkDetectionResult &getOutputCache() = 0;
 };
 
 } // machine_learning
index 92e062b609ee8b7cb463bd8ce57758014a234007..33b0137fd767f9cdf807675d09e8a6ef2570223b 100644 (file)
@@ -66,18 +66,19 @@ protected:
 public:
        LandmarkDetection(LandmarkDetectionTaskType task_type, std::shared_ptr<Config> config);
        virtual ~LandmarkDetection() = default;
-       void preDestroy();
-       LandmarkDetectionTaskType getTaskType();
-       void setEngineInfo(std::string engine_type_name, std::string device_type_name);
-       unsigned int getNumberOfEngines();
-       const std::string &getEngineType(unsigned int engine_index);
-       unsigned int getNumberOfDevices(const std::string &engine_type);
-       const std::string &getDeviceType(const std::string &engine_type, unsigned int device_index);
-       void configure();
-       void prepare();
-       void perform(mv_source_h &mv_src);
-       void performAsync(LandmarkDetectionInput &input);
-       LandmarkDetectionResult &getOutput();
+       void preDestroy() override;
+       LandmarkDetectionTaskType getTaskType() override;
+       void setEngineInfo(std::string engine_type_name, std::string device_type_name) override;
+       unsigned int getNumberOfEngines() override;
+       const std::string &getEngineType(unsigned int engine_index) override;
+       unsigned int getNumberOfDevices(const std::string &engine_type) override;
+       const std::string &getDeviceType(const std::string &engine_type, unsigned int device_index) override;
+       void configure() override;
+       void prepare() override;
+       void perform(mv_source_h &mv_src) override;
+       void performAsync(LandmarkDetectionInput &input) override;
+       LandmarkDetectionResult &getOutput() override;
+       LandmarkDetectionResult &getOutputCache() override;
 };
 
 } // machine_learning
index a200477ba7be489f38e4b303a6b875d81db2c001..42b2f07b7801d6ddd90518459dec10ee6ffca59a 100644 (file)
@@ -151,7 +151,7 @@ OutputBaseType &FacialLandmarkAdapter::getOutput()
 
 OutputBaseType &FacialLandmarkAdapter::getOutputCache()
 {
-       throw InvalidOperation("Not support yet.");
+       return _landmark_detection->getOutputCache();
 }
 
 }
index 3f979ccf28e3be2135c8a336d2382171399dcebc..86caa0472690bb25c947240cd0e263f9eb4f5ac1 100644 (file)
@@ -284,6 +284,11 @@ template<typename T> LandmarkDetectionResult &LandmarkDetection<T>::getOutput()
        return _current_result;
 }
 
+template<typename T> LandmarkDetectionResult &LandmarkDetection<T>::getOutputCache()
+{
+       return _current_result;
+}
+
 template<typename T> void LandmarkDetection<T>::performAsync(LandmarkDetectionInput &input)
 {
        if (!_async_manager) {
index 54cfd57306ec1e5d772f1d45b7cf33aed1bfd5b3..3d16a68807bf59d28480b6cb6333ceaf3f61f6ac 100644 (file)
@@ -150,7 +150,7 @@ OutputBaseType &PoseLandmarkAdapter::getOutput()
 
 OutputBaseType &PoseLandmarkAdapter::getOutputCache()
 {
-       throw InvalidOperation("Not support yet.");
+       return _landmark_detection->getOutputCache();
 }
 
 }
index f98d2a6095983855b983e99531b59fa7bc14f3c9..f3c59b2cf282002dd56c12cd58638548b4248763 100644 (file)
@@ -295,22 +295,17 @@ int mv_facial_landmark_inference_async(mv_facial_landmark_h handle, mv_source_h
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_facial_landmark_get_positions(mv_facial_landmark_h handle, unsigned int *number_of_landmarks,
-                                                                        unsigned int **pos_x, unsigned int **pos_y)
+int mv_facial_landmark_get_result_count(mv_facial_landmark_h handle, unsigned int *result_cnt)
 {
        MEDIA_VISION_SUPPORT_CHECK(mv_check_feature_key(feature_keys, num_keys, true));
        MEDIA_VISION_INSTANCE_CHECK(handle);
-       MEDIA_VISION_INSTANCE_CHECK(number_of_landmarks);
-       MEDIA_VISION_INSTANCE_CHECK(pos_x);
-       MEDIA_VISION_INSTANCE_CHECK(pos_y);
+       MEDIA_VISION_INSTANCE_CHECK(result_cnt);
 
        MEDIA_VISION_FUNCTION_ENTER();
 
        try {
                auto &result = static_cast<LandmarkDetectionResult &>(machine_learning_native_get_result(handle, TASK_NAME));
-               *number_of_landmarks = result.number_of_landmarks;
-               *pos_x = result.x_pos.data();
-               *pos_y = result.y_pos.data();
+               *result_cnt = result.number_of_landmarks;
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
@@ -320,3 +315,35 @@ int mv_facial_landmark_get_positions(mv_facial_landmark_h handle, unsigned int *
 
        return MEDIA_VISION_ERROR_NONE;
 }
+
+int mv_facial_landmark_get_result(mv_facial_landmark_h handle, unsigned int index, unsigned long *frame_number,
+                                                                 unsigned int *pos_x, unsigned int *pos_y)
+{
+       MEDIA_VISION_SUPPORT_CHECK(mv_check_feature_key(feature_keys, num_keys, true));
+       MEDIA_VISION_INSTANCE_CHECK(handle);
+       MEDIA_VISION_INSTANCE_CHECK(frame_number);
+       MEDIA_VISION_INSTANCE_CHECK(pos_x);
+       MEDIA_VISION_INSTANCE_CHECK(pos_y);
+
+       MEDIA_VISION_FUNCTION_ENTER();
+
+       try {
+               auto &result =
+                               static_cast<LandmarkDetectionResult &>(machine_learning_native_get_result_cache(handle, TASK_NAME));
+               if (index >= result.number_of_landmarks) {
+                       LOGE("Invalid index(index = %u, result count = %u).", index, result.number_of_landmarks);
+                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+               }
+
+               *frame_number = result.frame_number;
+               *pos_x = result.x_pos[index];
+               *pos_y = result.y_pos[index];
+       } catch (const BaseException &e) {
+               LOGE("%s", e.what());
+               return e.getError();
+       }
+
+       MEDIA_VISION_FUNCTION_LEAVE();
+
+       return MEDIA_VISION_ERROR_NONE;
+}
\ No newline at end of file
index 8abf04c4333e466429709854c8bda699a2ff9f95..66404aa31d31ee450f514f6d4445f02d4f7a00fd 100644 (file)
@@ -294,22 +294,17 @@ int mv_pose_landmark_inference_async(mv_pose_landmark_h handle, mv_source_h sour
        return MEDIA_VISION_ERROR_NONE;
 }
 
-int mv_pose_landmark_get_pos(mv_pose_landmark_h handle, unsigned int *number_of_landmarks, unsigned int **pos_x,
-                                                        unsigned int **pos_y)
+int mv_pose_landmark_get_result_count(mv_pose_landmark_h handle, unsigned int *result_cnt)
 {
        MEDIA_VISION_SUPPORT_CHECK(mv_check_feature_key(feature_keys, num_keys, true));
        MEDIA_VISION_INSTANCE_CHECK(handle);
-       MEDIA_VISION_INSTANCE_CHECK(number_of_landmarks);
-       MEDIA_VISION_INSTANCE_CHECK(pos_x);
-       MEDIA_VISION_INSTANCE_CHECK(pos_y);
+       MEDIA_VISION_INSTANCE_CHECK(result_cnt);
 
        MEDIA_VISION_FUNCTION_ENTER();
 
        try {
                auto &result = static_cast<LandmarkDetectionResult &>(machine_learning_native_get_result(handle, TASK_NAME));
-               *number_of_landmarks = result.number_of_landmarks;
-               *pos_x = result.x_pos.data();
-               *pos_y = result.y_pos.data();
+               *result_cnt = result.number_of_landmarks;
        } catch (const BaseException &e) {
                LOGE("%s", e.what());
                return e.getError();
@@ -319,3 +314,35 @@ int mv_pose_landmark_get_pos(mv_pose_landmark_h handle, unsigned int *number_of_
 
        return MEDIA_VISION_ERROR_NONE;
 }
+
+int mv_pose_landmark_get_result(mv_pose_landmark_h handle, unsigned int index, unsigned long *frame_number,
+                                                               unsigned int *pos_x, unsigned int *pos_y)
+{
+       MEDIA_VISION_SUPPORT_CHECK(mv_check_feature_key(feature_keys, num_keys, true));
+       MEDIA_VISION_INSTANCE_CHECK(handle);
+       MEDIA_VISION_INSTANCE_CHECK(frame_number);
+       MEDIA_VISION_INSTANCE_CHECK(pos_x);
+       MEDIA_VISION_INSTANCE_CHECK(pos_y);
+
+       MEDIA_VISION_FUNCTION_ENTER();
+
+       try {
+               auto &result =
+                               static_cast<LandmarkDetectionResult &>(machine_learning_native_get_result_cache(handle, TASK_NAME));
+               if (index >= result.number_of_landmarks) {
+                       LOGE("Invalid index(index = %u, result count = %u).", index, result.number_of_landmarks);
+                       return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+               }
+
+               *frame_number = result.frame_number;
+               *pos_x = result.x_pos[index];
+               *pos_y = result.y_pos[index];
+       } catch (const BaseException &e) {
+               LOGE("%s", e.what());
+               return e.getError();
+       }
+
+       MEDIA_VISION_FUNCTION_LEAVE();
+
+       return MEDIA_VISION_ERROR_NONE;
+}
\ No newline at end of file
index f01e4fe81d71612e89e11c357fd06ed3ec3d056b..7988aac28c3a7cda2ff722b40242121ffc49d3f0 100644 (file)
@@ -94,7 +94,7 @@ TEST(FacialLandmarkTest, InferenceShouldBeOk)
                // TODO.
        };
 
-       const unsigned int answer[][5] = { { 42, 87, 63, 48, 83 }, { 32, 31, 53, 75, 76 } };
+       const unsigned int coordinate_answers[][5] = { { 42, 87, 63, 48, 83 }, { 32, 31, 53, 75, 76 } };
 
        mv_source_h mv_source = NULL;
        int ret = mv_create_source(&mv_source);
@@ -125,15 +125,20 @@ TEST(FacialLandmarkTest, InferenceShouldBeOk)
                ret = mv_facial_landmark_inference(handle, mv_source);
                ASSERT_EQ(ret, 0);
 
-               unsigned int number_of_landmarks;
-               unsigned int *x_pos = nullptr, *y_pos = nullptr;
+               unsigned int number_of_objects;
 
-               ret = mv_facial_landmark_get_positions(handle, &number_of_landmarks, &x_pos, &y_pos);
-               ASSERT_EQ(ret, 0);
+               ret = mv_facial_landmark_get_result_count(handle, &number_of_objects);
+               ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+               for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
+                       unsigned long frame_number;
+                       unsigned int pos_x, pos_y;
+
+                       ret = mv_facial_landmark_get_result(handle, idx, &frame_number, &pos_x, &pos_y);
+                       ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
 
-               for (unsigned int idx = 0; idx < number_of_landmarks; ++idx) {
-                       int distance_x = x_pos[idx] - answer[0][idx];
-                       int distance_y = y_pos[idx] - answer[1][idx];
+                       int distance_x = pos_x - coordinate_answers[0][idx];
+                       int distance_y = pos_y - coordinate_answers[1][idx];
 
                        distance_x = distance_x < 0 ? distance_x * -1 : distance_x;
                        distance_y = distance_y < 0 ? distance_y * -1 : distance_y;
@@ -235,15 +240,20 @@ TEST(PoseLandmarkTest, InferenceShouldBeOk)
                ret = mv_pose_landmark_inference(handle, mv_source);
                ASSERT_EQ(ret, 0);
 
-               unsigned int number_of_landmarks;
-               unsigned int *x_pos = nullptr, *y_pos = nullptr;
+               unsigned int number_of_objects;
 
-               ret = mv_pose_landmark_get_pos(handle, &number_of_landmarks, &x_pos, &y_pos);
-               ASSERT_EQ(ret, 0);
+               ret = mv_pose_landmark_get_result_count(handle, &number_of_objects);
+               ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+               for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
+                       unsigned long frame_number;
+                       unsigned int pos_x, pos_y;
+
+                       ret = mv_pose_landmark_get_result(handle, idx, &frame_number, &pos_x, &pos_y);
+                       ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
 
-               for (unsigned int idx = 0; idx < number_of_landmarks; ++idx) {
-                       int distance_x = x_pos[idx] - coordinate_answers[0][idx];
-                       int distance_y = y_pos[idx] - coordinate_answers[1][idx];
+                       int distance_x = pos_x - coordinate_answers[0][idx];
+                       int distance_y = pos_y - coordinate_answers[1][idx];
 
                        distance_x = distance_x < 0 ? distance_x * -1 : distance_x;
                        distance_y = distance_y < 0 ? distance_y * -1 : distance_y;
index 4a8913907d14721a5c7ee00e5e0d75e486da16f4..656e7193aee276eaba3017411f80cda78f0c99c0 100644 (file)
@@ -44,23 +44,33 @@ struct model_info {
 
 void pose_landmark_callback(void *user_data)
 {
-       unsigned int number_of_landmarks;
-       unsigned int *x_pos = nullptr, *y_pos = nullptr;
-       unsigned long frame_number = 0;
        mv_pose_landmark_h handle = static_cast<mv_pose_landmark_h>(user_data);
        const unsigned int coordinate_answers[][10] = { { 300, 300, 275, 250, 325, 325, 225, 225, 350, 375 },
                                                                                                        { 50, 87, 100, 137, 100, 137, 187, 250, 187, 250 } };
 
-       while (frame_number < MAX_INFERENCE_ITERATION - 10) {
-               int ret = mv_pose_landmark_get_pos(handle, &number_of_landmarks, &x_pos, &y_pos);
+       bool is_loop_exit = false;
+
+       while (!is_loop_exit) {
+               unsigned int number_of_objects;
+
+               int ret = mv_pose_landmark_get_result_count(handle, &number_of_objects);
                if (ret == MEDIA_VISION_ERROR_INVALID_OPERATION)
                        break;
 
                ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
 
-               for (unsigned int idx = 0; idx < number_of_landmarks; ++idx) {
-                       int distance_x = x_pos[idx] - coordinate_answers[0][idx];
-                       int distance_y = y_pos[idx] - coordinate_answers[1][idx];
+               for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
+                       unsigned long frame_number;
+                       unsigned int pos_x, pos_y;
+
+                       ret = mv_pose_landmark_get_result(handle, idx, &frame_number, &pos_x, &pos_y);
+                       ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+                       if (frame_number > MAX_INFERENCE_ITERATION - 10)
+                               is_loop_exit = true;
+
+                       int distance_x = pos_x - coordinate_answers[0][idx];
+                       int distance_y = pos_y - coordinate_answers[1][idx];
 
                        distance_x = distance_x < 0 ? distance_x * -1 : distance_x;
                        distance_y = distance_y < 0 ? distance_y * -1 : distance_y;
@@ -182,22 +192,31 @@ TEST(PostLandmarkAsyncTest, InferenceShouldBeOkWithDestroyFirst)
 
 void facial_landmark_callback(void *user_data)
 {
-       unsigned int number_of_landmarks;
-       unsigned int *x_pos = nullptr, *y_pos = nullptr;
-       unsigned long frame_number = 0;
        mv_facial_landmark_h handle = static_cast<mv_facial_landmark_h>(user_data);
        const unsigned int coordinate_answers[][5] = { { 42, 87, 63, 48, 83 }, { 32, 31, 53, 75, 76 } };
+       bool is_loop_exit = false;
 
-       while (frame_number < MAX_INFERENCE_ITERATION - 10) {
-               int ret = mv_facial_landmark_get_positions(handle, &number_of_landmarks, &x_pos, &y_pos);
+       while (!is_loop_exit) {
+               unsigned int number_of_objects;
+
+               int ret = mv_facial_landmark_get_result_count(handle, &number_of_objects);
                if (ret == MEDIA_VISION_ERROR_INVALID_OPERATION)
                        break;
 
                ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
 
-               for (unsigned int idx = 0; idx < number_of_landmarks; ++idx) {
-                       int distance_x = x_pos[idx] - coordinate_answers[0][idx];
-                       int distance_y = y_pos[idx] - coordinate_answers[1][idx];
+               for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
+                       unsigned long frame_number;
+                       unsigned int pos_x, pos_y;
+
+                       ret = mv_facial_landmark_get_result(handle, idx, &frame_number, &pos_x, &pos_y);
+                       ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+                       if (frame_number > MAX_INFERENCE_ITERATION - 10)
+                               is_loop_exit = true;
+
+                       int distance_x = pos_x - coordinate_answers[0][idx];
+                       int distance_y = pos_y - coordinate_answers[1][idx];
 
                        distance_x = distance_x < 0 ? distance_x * -1 : distance_x;
                        distance_y = distance_y < 0 ? distance_y * -1 : distance_y;