mv_machine_learning: drop input and output type dependency from landmark detection
authorInki Dae <inki.dae@samsung.com>
Wed, 29 Nov 2023 06:19:45 +0000 (15:19 +0900)
committerKwanghoon Son <k.son@samsung.com>
Wed, 6 Dec 2023 01:36:46 +0000 (10:36 +0900)
[Issue type] : code refactoring

Drop input and output type dependency from landmark detection task group
by making the input and output types specific to the landmark detection
task group to be inherited from the common types,
and then by making adapter class of the landmark detection task group
to use the common type instead of specific one.

Change-Id: Ie366cab3919bdbbbf315263188f4ca4f6516cd33
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_machine_learning/landmark_detection/include/landmark_detection_type.h
mv_machine_learning/landmark_detection/src/facial_landmark_adapter.cpp
mv_machine_learning/landmark_detection/src/mv_facial_landmark.cpp
mv_machine_learning/landmark_detection/src/mv_pose_landmark.cpp
mv_machine_learning/landmark_detection/src/pose_landmark_adapter.cpp

index 27fb6ed..f2355a9 100644 (file)
 
 #include <mv_common.h>
 #include <mv_inference_type.h>
+#include "MachineLearningType.h"
 
 namespace mediavision
 {
 namespace machine_learning
 {
-struct LandmarkDetectionInput {
-       mv_source_h inference_src;
+struct LandmarkDetectionInput : public InputBaseType {
+       LandmarkDetectionInput(mv_source_h src = NULL) : InputBaseType(src)
+       {}
 };
 
-struct LandmarkDetectionResult {
-       unsigned long frame_number {};
+struct LandmarkDetectionResult : public OutputBaseType {
        unsigned int number_of_landmarks {};
        std::vector<unsigned int> x_pos;
        std::vector<unsigned int> y_pos;
index ccadda6..acebc95 100644 (file)
@@ -154,7 +154,7 @@ template<typename T, typename V> void FacialLandmarkAdapter<T, V>::perform()
 
 template<typename T, typename V> void FacialLandmarkAdapter<T, V>::performAsync(T &t)
 {
-       _landmark_detection->performAsync(t);
+       _landmark_detection->performAsync(static_cast<LandmarkDetectionInput &>(t));
 }
 
 template<typename T, typename V> V &FacialLandmarkAdapter<T, V>::getOutput()
@@ -167,6 +167,6 @@ template<typename T, typename V> V &FacialLandmarkAdapter<T, V>::getOutputCache(
        throw InvalidOperation("Not support yet.");
 }
 
-template class FacialLandmarkAdapter<LandmarkDetectionInput, LandmarkDetectionResult>;
+template class FacialLandmarkAdapter<InputBaseType, OutputBaseType>;
 }
 }
\ No newline at end of file
index 6dcfccd..34bf817 100644 (file)
@@ -35,7 +35,7 @@ using namespace mediavision::common;
 using namespace mediavision::machine_learning;
 using namespace MediaVision::Common;
 using namespace mediavision::machine_learning::exception;
-using LandmarkDetectionTask = ITask<LandmarkDetectionInput, LandmarkDetectionResult>;
+using LandmarkDetectionTask = ITask<InputBaseType, OutputBaseType>;
 
 int mv_facial_landmark_create(mv_facial_landmark_h *handle)
 {
@@ -49,7 +49,7 @@ int mv_facial_landmark_create(mv_facial_landmark_h *handle)
 
        try {
                context = new Context();
-               task = new FacialLandmarkAdapter<LandmarkDetectionInput, LandmarkDetectionResult>();
+               task = new FacialLandmarkAdapter<InputBaseType, OutputBaseType>();
                context->__tasks.insert(make_pair("facial_landmark", task));
                *handle = static_cast<mv_facial_landmark_h>(context);
        } catch (const BaseException &e) {
@@ -294,7 +294,7 @@ int mv_facial_landmark_inference(mv_facial_landmark_h handle, mv_source_h source
                auto context = static_cast<Context *>(handle);
                auto task = static_cast<LandmarkDetectionTask *>(context->__tasks.at("facial_landmark"));
 
-               LandmarkDetectionInput input = { source };
+               LandmarkDetectionInput input(source);
 
                task->setInput(input);
                task->perform();
@@ -320,7 +320,7 @@ int mv_facial_landmark_inference_async(mv_facial_landmark_h handle, mv_source_h
                auto context = static_cast<Context *>(handle);
                auto task = static_cast<LandmarkDetectionTask *>(context->__tasks.at("facial_landmark"));
 
-               LandmarkDetectionInput input = { source };
+               LandmarkDetectionInput input(source);
 
                task->performAsync(input);
        } catch (const BaseException &e) {
@@ -348,7 +348,7 @@ int mv_facial_landmark_get_positions(mv_facial_landmark_h handle, unsigned int *
                auto context = static_cast<Context *>(handle);
                auto task = static_cast<LandmarkDetectionTask *>(context->__tasks.at("facial_landmark"));
 
-               LandmarkDetectionResult &result = task->getOutput();
+               auto &result = static_cast<LandmarkDetectionResult &>(task->getOutput());
                *number_of_landmarks = result.number_of_landmarks;
                *pos_x = result.x_pos.data();
                *pos_y = result.y_pos.data();
index 4eca588..72f3aa4 100644 (file)
@@ -35,7 +35,7 @@ using namespace mediavision::common;
 using namespace mediavision::machine_learning;
 using namespace MediaVision::Common;
 using namespace mediavision::machine_learning::exception;
-using LandmarkDetectionTask = ITask<LandmarkDetectionInput, LandmarkDetectionResult>;
+using LandmarkDetectionTask = ITask<InputBaseType, OutputBaseType>;
 
 int mv_pose_landmark_create(mv_pose_landmark_h *handle)
 {
@@ -49,7 +49,7 @@ int mv_pose_landmark_create(mv_pose_landmark_h *handle)
 
        try {
                context = new Context();
-               task = new PoseLandmarkAdapter<LandmarkDetectionInput, LandmarkDetectionResult>();
+               task = new PoseLandmarkAdapter<InputBaseType, OutputBaseType>();
                context->__tasks.insert(make_pair("pose_landmark", task));
                *handle = static_cast<mv_pose_landmark_h>(context);
        } catch (const BaseException &e) {
@@ -293,7 +293,7 @@ int mv_pose_landmark_inference(mv_pose_landmark_h handle, mv_source_h source)
                auto context = static_cast<Context *>(handle);
                auto task = static_cast<LandmarkDetectionTask *>(context->__tasks.at("pose_landmark"));
 
-               LandmarkDetectionInput input = { source };
+               LandmarkDetectionInput input(source);
 
                task->setInput(input);
                task->perform();
@@ -319,7 +319,7 @@ int mv_pose_landmark_inference_async(mv_pose_landmark_h handle, mv_source_h sour
                auto context = static_cast<Context *>(handle);
                auto task = static_cast<LandmarkDetectionTask *>(context->__tasks.at("pose_landmark"));
 
-               LandmarkDetectionInput input = { source };
+               LandmarkDetectionInput input(source);
 
                task->performAsync(input);
        } catch (const BaseException &e) {
@@ -347,7 +347,7 @@ int mv_pose_landmark_get_pos(mv_pose_landmark_h handle, unsigned int *number_of_
                auto context = static_cast<Context *>(handle);
                auto task = static_cast<LandmarkDetectionTask *>(context->__tasks.at("pose_landmark"));
 
-               LandmarkDetectionResult &result = task->getOutput();
+               auto &result = static_cast<LandmarkDetectionResult &>(task->getOutput());
                *number_of_landmarks = result.number_of_landmarks;
                *pos_x = result.x_pos.data();
                *pos_y = result.y_pos.data();
index 54cb21e..486bcdd 100644 (file)
@@ -153,7 +153,7 @@ template<typename T, typename V> void PoseLandmarkAdapter<T, V>::perform()
 
 template<typename T, typename V> void PoseLandmarkAdapter<T, V>::performAsync(T &t)
 {
-       _landmark_detection->performAsync(t);
+       _landmark_detection->performAsync(static_cast<LandmarkDetectionInput &>(t));
 }
 
 template<typename T, typename V> V &PoseLandmarkAdapter<T, V>::getOutput()
@@ -166,6 +166,6 @@ template<typename T, typename V> V &PoseLandmarkAdapter<T, V>::getOutputCache()
        throw InvalidOperation("Not support yet.");
 }
 
-template class PoseLandmarkAdapter<LandmarkDetectionInput, LandmarkDetectionResult>;
+template class PoseLandmarkAdapter<InputBaseType, OutputBaseType>;
 }
 }