#include <mv_common.h>
#include <mv_inference_type.h>
+#include "MachineLearningType.h"
namespace mediavision
{
namespace machine_learning
{
-struct LandmarkDetectionInput {
- mv_source_h inference_src;
+struct LandmarkDetectionInput : public InputBaseType {
+ LandmarkDetectionInput(mv_source_h src = NULL) : InputBaseType(src)
+ {}
};
-struct LandmarkDetectionResult {
- unsigned long frame_number {};
+struct LandmarkDetectionResult : public OutputBaseType {
unsigned int number_of_landmarks {};
std::vector<unsigned int> x_pos;
std::vector<unsigned int> y_pos;
template<typename T, typename V> void FacialLandmarkAdapter<T, V>::performAsync(T &t)
{
- _landmark_detection->performAsync(t);
+ _landmark_detection->performAsync(static_cast<LandmarkDetectionInput &>(t));
}
template<typename T, typename V> V &FacialLandmarkAdapter<T, V>::getOutput()
throw InvalidOperation("Not support yet.");
}
-template class FacialLandmarkAdapter<LandmarkDetectionInput, LandmarkDetectionResult>;
+template class FacialLandmarkAdapter<InputBaseType, OutputBaseType>;
}
}
\ No newline at end of file
using namespace mediavision::machine_learning;
using namespace MediaVision::Common;
using namespace mediavision::machine_learning::exception;
-using LandmarkDetectionTask = ITask<LandmarkDetectionInput, LandmarkDetectionResult>;
+using LandmarkDetectionTask = ITask<InputBaseType, OutputBaseType>;
int mv_facial_landmark_create(mv_facial_landmark_h *handle)
{
try {
context = new Context();
- task = new FacialLandmarkAdapter<LandmarkDetectionInput, LandmarkDetectionResult>();
+ task = new FacialLandmarkAdapter<InputBaseType, OutputBaseType>();
context->__tasks.insert(make_pair("facial_landmark", task));
*handle = static_cast<mv_facial_landmark_h>(context);
} catch (const BaseException &e) {
auto context = static_cast<Context *>(handle);
auto task = static_cast<LandmarkDetectionTask *>(context->__tasks.at("facial_landmark"));
- LandmarkDetectionInput input = { source };
+ LandmarkDetectionInput input(source);
task->setInput(input);
task->perform();
auto context = static_cast<Context *>(handle);
auto task = static_cast<LandmarkDetectionTask *>(context->__tasks.at("facial_landmark"));
- LandmarkDetectionInput input = { source };
+ LandmarkDetectionInput input(source);
task->performAsync(input);
} catch (const BaseException &e) {
auto context = static_cast<Context *>(handle);
auto task = static_cast<LandmarkDetectionTask *>(context->__tasks.at("facial_landmark"));
- LandmarkDetectionResult &result = task->getOutput();
+ auto &result = static_cast<LandmarkDetectionResult &>(task->getOutput());
*number_of_landmarks = result.number_of_landmarks;
*pos_x = result.x_pos.data();
*pos_y = result.y_pos.data();
using namespace mediavision::machine_learning;
using namespace MediaVision::Common;
using namespace mediavision::machine_learning::exception;
-using LandmarkDetectionTask = ITask<LandmarkDetectionInput, LandmarkDetectionResult>;
+using LandmarkDetectionTask = ITask<InputBaseType, OutputBaseType>;
int mv_pose_landmark_create(mv_pose_landmark_h *handle)
{
try {
context = new Context();
- task = new PoseLandmarkAdapter<LandmarkDetectionInput, LandmarkDetectionResult>();
+ task = new PoseLandmarkAdapter<InputBaseType, OutputBaseType>();
context->__tasks.insert(make_pair("pose_landmark", task));
*handle = static_cast<mv_pose_landmark_h>(context);
} catch (const BaseException &e) {
auto context = static_cast<Context *>(handle);
auto task = static_cast<LandmarkDetectionTask *>(context->__tasks.at("pose_landmark"));
- LandmarkDetectionInput input = { source };
+ LandmarkDetectionInput input(source);
task->setInput(input);
task->perform();
auto context = static_cast<Context *>(handle);
auto task = static_cast<LandmarkDetectionTask *>(context->__tasks.at("pose_landmark"));
- LandmarkDetectionInput input = { source };
+ LandmarkDetectionInput input(source);
task->performAsync(input);
} catch (const BaseException &e) {
auto context = static_cast<Context *>(handle);
auto task = static_cast<LandmarkDetectionTask *>(context->__tasks.at("pose_landmark"));
- LandmarkDetectionResult &result = task->getOutput();
+ auto &result = static_cast<LandmarkDetectionResult &>(task->getOutput());
*number_of_landmarks = result.number_of_landmarks;
*pos_x = result.x_pos.data();
*pos_y = result.y_pos.data();
template<typename T, typename V> void PoseLandmarkAdapter<T, V>::performAsync(T &t)
{
- _landmark_detection->performAsync(t);
+ _landmark_detection->performAsync(static_cast<LandmarkDetectionInput &>(t));
}
template<typename T, typename V> V &PoseLandmarkAdapter<T, V>::getOutput()
throw InvalidOperation("Not support yet.");
}
-template class PoseLandmarkAdapter<LandmarkDetectionInput, LandmarkDetectionResult>;
+template class PoseLandmarkAdapter<InputBaseType, OutputBaseType>;
}
}