_rotation_vector = Mat(Size(3,1), CV_32FC1);
_translation_vector = Mat(Size(3,1), CV_32FC1);
- CameraParamManager cam_dummy("/usr/share/singleo/camera_param_rs2.yaml");
+ CameraParamManager cam_dummy("/usr/share/singleo/camera_param_rs2_640.yaml");
_camera_matrix = Mat::zeros(Size(3,3), CV_32FC1);
tuple<float, float> focalLength = cam_dummy.getFocalLengthXY();
}
+void HeadPoseEstimator::print3x3Matrix(cv::Mat &matrix, std::string name)
+{
+ SINGLEO_LOGI("Print %s matrix", name.c_str());
+ SINGLEO_LOGI("%d x %d", matrix.cols, matrix.rows);
+ for (int r = 0; r < matrix.rows; ++r) {
+ SINGLEO_LOGD("%f, %f, %f", matrix.at<float>(r, 0), matrix.at<float>(r, 1), matrix.at<float>(r, 2));
+ }
+}
+
+void HeadPoseEstimator::print3x1Matrix(cv::Mat &matrix, std::string name)
+{
+ SINGLEO_LOGI("Print %s matrix", name.c_str());
+ SINGLEO_LOGI("%d x %d", matrix.cols, matrix.rows);
+ for (int r = 0; r < matrix.rows; ++r) {
+ SINGLEO_LOGD("%f", matrix.at<double>(r, 0));
+ }
+}
+
+void HeadPoseEstimator::printVec3f(cv::Vec3f &vec, std::string name)
+{
+ SINGLEO_LOGI("Print %s vector", name.c_str());
+ SINGLEO_LOGD("%.3f, %.3f, %.3f", vec[0], vec[1], vec[2]);
+}
+
PoseVector HeadPoseEstimator::estimate(const vector<Point> &landmark2d)
{
Timer timer;
_rotation_vector, _translation_vector, true, cv::SOLVEPNP_ITERATIVE);
SINGLEO_LOGI("2nd solvePnP takes %d ms", timer.check_ms());
- return PoseVector{ Point3f{ _rotation_vector.at<float>(0), _rotation_vector.at<float>(1), _rotation_vector.at<float>(2)},
- Point3f{ _translation_vector.at<float>(0), _translation_vector.at<float>(1), _translation_vector.at<float>(2)} };
+ timer.reset();
+ cv::Rodrigues(_rotation_vector, _rotation_matrix);
+ auto rotation_eulerAngles = rotationMatrixToEulerAngles(_rotation_matrix);
+ SINGLEO_LOGI("Rodrigues takes %d ms", timer.check_ms());
+
+ return PoseVector{ Point3f{ rotation_eulerAngles[0], rotation_eulerAngles[1], rotation_eulerAngles[2]},
+ Point3f{ static_cast<float>(_translation_vector.at<double>(0)),
+ static_cast<float>(_translation_vector.at<double>(1)),
+ static_cast<float>(_translation_vector.at<double>(2))} };
}
vector<Point2f> &HeadPoseEstimator::getPoseAxes()
{
std::vector<cv::Point2f> poseAxes2d;
cv::projectPoints(_pose_axes_3d, _rotation_vector, _translation_vector, _camera_matrix, _camera_dist_coeff, poseAxes2d);
- SINGLEO_LOGI("projectPoints %zd", poseAxes2d.size());
+
for (size_t i = 0; i < poseAxes2d.size(); i++) {
_pose_axes[i].x = poseAxes2d[i].x;
_pose_axes[i].y = poseAxes2d[i].y;
return _pose_axes;
}
+double HeadPoseEstimator::calculateReProjectectionError()
+{
+ std::vector<cv::Point2f> landmarks_2d;
+ cv::projectPoints(_landmarks_3d, _rotation_vector, _translation_vector, _camera_matrix, _camera_dist_coeff, landmarks_2d);
+ double error = 0.0f;
+
+ for (size_t i = 0; i < _landmarks_2d.size(); i++) {
+ double diff = cv::norm(cv::Mat(_landmarks_2d[i]), cv::Mat(landmarks_2d[i]), cv::NORM_L2);
+ error += diff * diff;
+ }
+
+ return std::sqrt(error/_landmarks_2d.size());
+}
+
std::vector<int> &HeadPoseEstimator::getFaceComponentsIndieces()
{
return _faceComponentIndices;
}
+bool HeadPoseEstimator::validRotationMatrix(cv::Mat &matrix)
+{
+ Mat matrixT;
+ transpose(matrix, matrixT);
+ Mat shouldBeIdentity = matrixT * matrix;
+ Mat I = Mat::eye(3,3, shouldBeIdentity.type());
+
+ return norm(I, shouldBeIdentity) < 1e-6;
+}
+
+Vec3f HeadPoseEstimator::rotationMatrixToEulerAngles(cv::Mat &matrix)
+{
+ float sy = sqrt(matrix.at<double>(0,0) * matrix.at<double>(0,0) + matrix.at<double>(1,0) * matrix.at<double>(1,0) );
+
+ bool singular = sy < 1e-6; // If
+
+ float x, y, z;
+ if (!singular)
+ {
+ x = atan2(matrix.at<double>(2,1) , matrix.at<double>(2,2));
+ y = atan2(-matrix.at<double>(2,0), sy);
+ z = atan2(matrix.at<double>(1,0), matrix.at<double>(0,0));
+ }
+ else
+ {
+ x = atan2(-matrix.at<double>(1,2), matrix.at<double>(1,1));
+ y = atan2(-matrix.at<double>(2,0), sy);
+ z = 0;
+ }
+ return Vec3f(x, y, z);
+}
+
} // smartpointer
} // services
} // singleo
}
void data_feeder_cb(unsigned char *buffer, unsigned int width, unsigned int height, unsigned int bytes_per_pixel,
- void *headPose, void *gesture)
+ void *headRot, void *headTrans)
{
cv::Mat result(cv::Size(width, height), CV_MAKETYPE(CV_8U, bytes_per_pixel), buffer);
cv::Mat vizData;
cv::cvtColor(result, vizData, cv::COLOR_BGR2RGBA);
- Point3f *dof3d = static_cast<Point3f *>(headPose);
- cout << "[Pitch, Yaw, Roll]: [ " << dof3d->x << ", " << dof3d->y << ", " << dof3d->z << " ]" << endl;
+ Point3f *dof3d_rot = static_cast<Point3f *>(headRot);
+ Point3f *dof3d_trans = static_cast<Point3f *>(headTrans);
+ cout << "[Pitch, Yaw, Roll]: [ " << dof3d_rot->x << ", " << dof3d_rot->y << ", " << dof3d_rot->z << " ]" << endl;
- if (gesture) {
- string *gesture_str = static_cast<string *>(gesture);
- cv::putText(vizData, *gesture_str, cv::Point(30, 30), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0, 255), 2);
- cout << "[Gesture Str]: " << *gesture_str << endl;
- }
+ cout << setprecision(5) << "[X]: [ " << dof3d_trans->x << " ]" << endl;
+ cout << setprecision(5) << "[Y]: [ " << dof3d_trans->y << " ]" << endl;
+ cout << setprecision(5) << "[Z]: [ " << dof3d_trans->z << " ]" << endl;
+
+ // if (gesture) {
+ // string *gesture_str = static_cast<string *>(gesture);
+ // cv::putText(vizData, *gesture_str, cv::Point(30, 30), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0, 255), 2);
+ // cout << "[Gesture Str]: " << *gesture_str << endl;
+ // }
singleo_util_visualizer_2d(vizData, NULL);
}