#include "InputCamera.h"
#include "SingleoLog.h"
+
using namespace std;
using namespace singleo::inference;
using namespace singleo::input;
auto &result = _face_estimator->result();
SINGLEO_LOGD("Result done");
+ _headPose = _head_pose_estimator->estimate(result._landmarks[0]);
+ vector<Point2f> poseAxes = _head_pose_estimator->getPoseAxes();
+
+ ImageDataType &data = dynamic_cast<ImageDataType &>(input);
+ cv::Mat _dump(cv::Size(data.width, data.height), CV_MAKETYPE(CV_8U, data.byte_per_pixel), data.ptr);
+ cv::line(_dump, cv::Point(poseAxes[0].x, poseAxes[0].y), cv::Point(poseAxes[1].x, poseAxes[1].y), cv::Scalar(0, 0, 255), 2); // x - red
+ cv::line(_dump, cv::Point(poseAxes[0].x, poseAxes[0].y), cv::Point(poseAxes[2].x, poseAxes[2].y), cv::Scalar(0, 255, 0), 2); // y - green
+ cv::line(_dump, cv::Point(poseAxes[0].x, poseAxes[0].y), cv::Point(poseAxes[3].x, poseAxes[3].y), cv::Scalar(255, 0, 0), 2); // z - blue
+
+ cv::imwrite("dump_headpose.jpg", _dump);
// auto &headPose = dynamic_cast<FldResultType&>(result);
// SINGLEO_LOGD("Landmark: %zd, %zd",
// headPose._landmarks[0].x, headPose._landmarks[0].y);
#include "CameraParamManager.h"
#include "HeadPoseEstimator.h"
#include "SingleoLog.h"
+#include "SingleoTimer.h"
using namespace std;
using namespace cv;
for (auto &point : faceShape)
_landmarks_3d.push_back(cv::Point3f(point.x, point.y, point.z));
+ // 33rd landmark is the nose's tip
+ _pose_axes_3d.push_back(_landmarks_3d[33]);
+ _pose_axes_3d.push_back(cv::Point3f(_landmarks_3d[33].x - 50, _landmarks_3d[33].y, _landmarks_3d[33].z));
+ _pose_axes_3d.push_back(cv::Point3f(_landmarks_3d[33].x, _landmarks_3d[33].y - 50, _landmarks_3d[33].z));
+ _pose_axes_3d.push_back(cv::Point3f(_landmarks_3d[33].x, _landmarks_3d[33].y, _landmarks_3d[33].z -50));
+ _pose_axes.resize(_pose_axes_3d.size());
+
+ _landmarks_2d.resize(_landmarks_3d.size());
+ _rotation_vector = Mat(Size(3,1), CV_32FC1);
+ _translation_vector = Mat(Size(3,1), CV_32FC1);
+
CameraParamManager cam_dummy("/usr/share/singleo/camera_param_dummy.yaml");
- _camera_param = Mat::zeros(Size(3,3), CV_32FC1);
+ _camera_matrix = Mat::zeros(Size(3,3), CV_32FC1);
tuple<float, float> focalLength = cam_dummy.getFocalLengthXY();
tuple<float, float> principlePoint = cam_dummy.getPrinciplePointXY();
- _camera_param.at<float>(0,0) = get<0>(focalLength);
- _camera_param.at<float>(1,1) = get<1>(focalLength);
- _camera_param.at<float>(0,2) = get<0>(principlePoint);
- _camera_param.at<float>(1,2) = get<1>(principlePoint);
- _camera_param.at<float>(2,2) = 1.0;
+ _camera_matrix.at<float>(0,0) = get<0>(focalLength);
+ _camera_matrix.at<float>(1,1) = get<1>(focalLength);
+ _camera_matrix.at<float>(0,2) = get<0>(principlePoint);
+ _camera_matrix.at<float>(1,2) = get<1>(principlePoint);
+ _camera_matrix.at<float>(2,2) = 1.0;
_camera_dist_coeff = Mat::zeros(Size(1,5), CV_32FC1);
tuple<float, float, float, float, float> distCoeff = cam_dummy.getDistortionCoeff();
_camera_dist_coeff.at<float>(0,3) = get<3>(distCoeff);
_camera_dist_coeff.at<float>(0,4) = get<4>(distCoeff);
- for (int r = 0; r < _camera_param.rows; ++r) {
- for (int c = 0; c < _camera_param.cols; ++c) {
- SINGLEO_LOGI("%.3f", _camera_param.at<float>(r, c));
+ for (int r = 0; r < _camera_matrix.rows; ++r) {
+ for (int c = 0; c < _camera_matrix.cols; ++c) {
+ SINGLEO_LOGI("%.3f", _camera_matrix.at<float>(r, c));
}
}
for (int r = 0; r < _camera_dist_coeff.rows; ++r) {
}
+PoseVector HeadPoseEstimator::estimate(const vector<Point> &landmark2d)
+{
+ Timer timer;
+
+ // TODO: parallel??, c++17 structured binding??
+ for (size_t idx = 0; idx < landmark2d.size(); ++idx) {
+ _landmarks_2d[idx].x = landmark2d[idx].x;
+ _landmarks_2d[idx].y = landmark2d[idx].y;
+ }
+ SINGLEO_LOGI("Get 2d landmark takes %d ms", timer.check_ms());
+
+ timer.reset();
+ // init
+ cv::solvePnPRansac(_landmarks_3d, _landmarks_2d,
+ _camera_matrix, _camera_dist_coeff,
+ _rotation_vector, _translation_vector, false, 100, 8.0F, 0.99, cv::noArray(), cv::SOLVEPNP_EPNP);
+ SINGLEO_LOGI("1st solvePnP takes %d ms", timer.check_ms());
+
+ timer.reset();
+ // estimate pose
+ // https://docs.opencv.org/4.x/d9/d0c/group__calib3d.html#ga357634492a94efe8858d0ce1509da869
+ cv::solvePnP(_landmarks_3d, _landmarks_2d,
+ _camera_matrix, _camera_dist_coeff,
+ _rotation_vector, _translation_vector, true, cv::SOLVEPNP_ITERATIVE);
+ SINGLEO_LOGI("2nd solvePnP takes %d ms", timer.check_ms());
+
+ return PoseVector{ Point3f{ _rotation_vector.at<float>(0), _rotation_vector.at<float>(1), _rotation_vector.at<float>(2)},
+ Point3f{ _translation_vector.at<float>(0), _translation_vector.at<float>(1), _translation_vector.at<float>(2)} };
+}
+
+vector<Point2f> &HeadPoseEstimator::getPoseAxes()
+{
+ std::vector<cv::Point2f> poseAxes2d;
+ cv::projectPoints(_pose_axes_3d, _rotation_vector, _translation_vector, _camera_matrix, _camera_dist_coeff, poseAxes2d);
+ SINGLEO_LOGI("projectPoints %zd", poseAxes2d.size());
+ for (size_t i = 0; i < poseAxes2d.size(); i++) {
+ _pose_axes[i].x = poseAxes2d[i].x;
+ _pose_axes[i].y = poseAxes2d[i].y;
+ }
+
+ return _pose_axes;
+}
+
} // smartpointer
} // services
} // singleo