1 // Ceres Solver - A fast non-linear least squares minimizer
2 // Copyright 2015 Google Inc. All rights reserved.
3 // http://ceres-solver.org/
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are met:
8 // * Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright notice,
11 // this list of conditions and the following disclaimer in the documentation
12 // and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors may be
14 // used to endorse or promote products derived from this software without
15 // specific prior written permission.
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 // POSSIBILITY OF SUCH DAMAGE.
29 // Author: sameeragarwal@google.com (Sameer Agarwal)
31 #include "ceres/dogleg_strategy.h"
34 #include "Eigen/Dense"
35 #include "ceres/array_utils.h"
36 #include "ceres/internal/eigen.h"
37 #include "ceres/linear_least_squares_problems.h"
38 #include "ceres/linear_solver.h"
39 #include "ceres/polynomial.h"
40 #include "ceres/sparse_matrix.h"
41 #include "ceres/trust_region_strategy.h"
42 #include "ceres/types.h"
43 #include "glog/logging.h"
48 const double kMaxMu = 1.0;
49 const double kMinMu = 1e-8;
52 DoglegStrategy::DoglegStrategy(const TrustRegionStrategy::Options& options)
53 : linear_solver_(options.linear_solver),
54 radius_(options.initial_radius),
55 max_radius_(options.max_radius),
56 min_diagonal_(options.min_lm_diagonal),
57 max_diagonal_(options.max_lm_diagonal),
61 mu_increase_factor_(10.0),
62 increase_threshold_(0.75),
63 decrease_threshold_(0.25),
64 dogleg_step_norm_(0.0),
66 dogleg_type_(options.dogleg_type) {
67 CHECK_NOTNULL(linear_solver_);
68 CHECK_GT(min_diagonal_, 0.0);
69 CHECK_LE(min_diagonal_, max_diagonal_);
70 CHECK_GT(max_radius_, 0.0);
73 // If the reuse_ flag is not set, then the Cauchy point (scaled
74 // gradient) and the new Gauss-Newton step are computed from
75 // scratch. The Dogleg step is then computed as interpolation of these
77 TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
78 const TrustRegionStrategy::PerSolveOptions& per_solve_options,
79 SparseMatrix* jacobian,
80 const double* residuals,
82 CHECK_NOTNULL(jacobian);
83 CHECK_NOTNULL(residuals);
86 const int n = jacobian->num_cols();
88 // Gauss-Newton and gradient vectors are always available, only a
89 // new interpolant need to be computed. For the subspace case,
90 // the subspace and the two-dimensional model are also still valid.
91 switch (dogleg_type_) {
92 case TRADITIONAL_DOGLEG:
93 ComputeTraditionalDoglegStep(step);
97 ComputeSubspaceDoglegStep(step);
100 TrustRegionStrategy::Summary summary;
101 summary.num_iterations = 0;
102 summary.termination_type = LINEAR_SOLVER_SUCCESS;
107 // Check that we have the storage needed to hold the various
108 // temporary vectors.
109 if (diagonal_.rows() != n) {
110 diagonal_.resize(n, 1);
111 gradient_.resize(n, 1);
112 gauss_newton_step_.resize(n, 1);
115 // Vector used to form the diagonal matrix that is used to
116 // regularize the Gauss-Newton solve and that defines the
117 // elliptical trust region
119 // || D * step || <= radius_ .
121 jacobian->SquaredColumnNorm(diagonal_.data());
122 for (int i = 0; i < n; ++i) {
123 diagonal_[i] = std::min(std::max(diagonal_[i], min_diagonal_),
126 diagonal_ = diagonal_.array().sqrt();
128 ComputeGradient(jacobian, residuals);
129 ComputeCauchyPoint(jacobian);
131 LinearSolver::Summary linear_solver_summary =
132 ComputeGaussNewtonStep(per_solve_options, jacobian, residuals);
134 TrustRegionStrategy::Summary summary;
135 summary.residual_norm = linear_solver_summary.residual_norm;
136 summary.num_iterations = linear_solver_summary.num_iterations;
137 summary.termination_type = linear_solver_summary.termination_type;
139 if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
143 if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) {
144 switch (dogleg_type_) {
145 // Interpolate the Cauchy point and the Gauss-Newton step.
146 case TRADITIONAL_DOGLEG:
147 ComputeTraditionalDoglegStep(step);
150 // Find the minimum in the subspace defined by the
151 // Cauchy point and the (Gauss-)Newton step.
152 case SUBSPACE_DOGLEG:
153 if (!ComputeSubspaceModel(jacobian)) {
154 summary.termination_type = LINEAR_SOLVER_FAILURE;
157 ComputeSubspaceDoglegStep(step);
165 // The trust region is assumed to be elliptical with the
166 // diagonal scaling matrix D defined by sqrt(diagonal_).
167 // It is implemented by substituting step' = D * step.
168 // The trust region for step' is spherical.
169 // The gradient, the Gauss-Newton step, the Cauchy point,
170 // and all calculations involving the Jacobian have to
171 // be adjusted accordingly.
172 void DoglegStrategy::ComputeGradient(
173 SparseMatrix* jacobian,
174 const double* residuals) {
176 jacobian->LeftMultiply(residuals, gradient_.data());
177 gradient_.array() /= diagonal_.array();
180 // The Cauchy point is the global minimizer of the quadratic model
181 // along the one-dimensional subspace spanned by the gradient.
182 void DoglegStrategy::ComputeCauchyPoint(SparseMatrix* jacobian) {
183 // alpha * -gradient is the Cauchy point.
184 Vector Jg(jacobian->num_rows());
186 // The Jacobian is scaled implicitly by computing J * (D^-1 * (D^-1 * g))
187 // instead of (J * D^-1) * (D^-1 * g).
188 Vector scaled_gradient =
189 (gradient_.array() / diagonal_.array()).matrix();
190 jacobian->RightMultiply(scaled_gradient.data(), Jg.data());
191 alpha_ = gradient_.squaredNorm() / Jg.squaredNorm();
194 // The dogleg step is defined as the intersection of the trust region
195 // boundary with the piecewise linear path from the origin to the Cauchy
196 // point and then from there to the Gauss-Newton point (global minimizer
197 // of the model function). The Gauss-Newton point is taken if it lies
198 // within the trust region.
199 void DoglegStrategy::ComputeTraditionalDoglegStep(double* dogleg) {
200 VectorRef dogleg_step(dogleg, gradient_.rows());
202 // Case 1. The Gauss-Newton step lies inside the trust region, and
203 // is therefore the optimal solution to the trust-region problem.
204 const double gradient_norm = gradient_.norm();
205 const double gauss_newton_norm = gauss_newton_step_.norm();
206 if (gauss_newton_norm <= radius_) {
207 dogleg_step = gauss_newton_step_;
208 dogleg_step_norm_ = gauss_newton_norm;
209 dogleg_step.array() /= diagonal_.array();
210 VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
211 << " radius: " << radius_;
215 // Case 2. The Cauchy point and the Gauss-Newton steps lie outside
216 // the trust region. Rescale the Cauchy point to the trust region
218 if (gradient_norm * alpha_ >= radius_) {
219 dogleg_step = -(radius_ / gradient_norm) * gradient_;
220 dogleg_step_norm_ = radius_;
221 dogleg_step.array() /= diagonal_.array();
222 VLOG(3) << "Cauchy step size: " << dogleg_step_norm_
223 << " radius: " << radius_;
227 // Case 3. The Cauchy point is inside the trust region and the
228 // Gauss-Newton step is outside. Compute the line joining the two
229 // points and the point on it which intersects the trust region
232 // a = alpha * -gradient
233 // b = gauss_newton_step
234 const double b_dot_a = -alpha_ * gradient_.dot(gauss_newton_step_);
235 const double a_squared_norm = pow(alpha_ * gradient_norm, 2.0);
236 const double b_minus_a_squared_norm =
237 a_squared_norm - 2 * b_dot_a + pow(gauss_newton_norm, 2);
240 // = alpha * -gradient' gauss_newton_step - alpha^2 |gradient|^2
241 const double c = b_dot_a - a_squared_norm;
242 const double d = sqrt(c * c + b_minus_a_squared_norm *
243 (pow(radius_, 2.0) - a_squared_norm));
247 ? (d - c) / b_minus_a_squared_norm
248 : (radius_ * radius_ - a_squared_norm) / (d + c);
249 dogleg_step = (-alpha_ * (1.0 - beta)) * gradient_
250 + beta * gauss_newton_step_;
251 dogleg_step_norm_ = dogleg_step.norm();
252 dogleg_step.array() /= diagonal_.array();
253 VLOG(3) << "Dogleg step size: " << dogleg_step_norm_
254 << " radius: " << radius_;
257 // The subspace method finds the minimum of the two-dimensional problem
259 // min. 1/2 x' B' H B x + g' B x
260 // s.t. || B x ||^2 <= r^2
262 // where r is the trust region radius and B is the matrix with unit columns
263 // spanning the subspace defined by the steepest descent and Newton direction.
264 // This subspace by definition includes the Gauss-Newton point, which is
265 // therefore taken if it lies within the trust region.
266 void DoglegStrategy::ComputeSubspaceDoglegStep(double* dogleg) {
267 VectorRef dogleg_step(dogleg, gradient_.rows());
269 // The Gauss-Newton point is inside the trust region if |GN| <= radius_.
270 // This test is valid even though radius_ is a length in the two-dimensional
271 // subspace while gauss_newton_step_ is expressed in the (scaled)
272 // higher dimensional original space. This is because
274 // 1. gauss_newton_step_ by definition lies in the subspace, and
275 // 2. the subspace basis is orthonormal.
277 // As a consequence, the norm of the gauss_newton_step_ in the subspace is
278 // the same as its norm in the original space.
279 const double gauss_newton_norm = gauss_newton_step_.norm();
280 if (gauss_newton_norm <= radius_) {
281 dogleg_step = gauss_newton_step_;
282 dogleg_step_norm_ = gauss_newton_norm;
283 dogleg_step.array() /= diagonal_.array();
284 VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
285 << " radius: " << radius_;
289 // The optimum lies on the boundary of the trust region. The above problem
292 // min. 1/2 x^T B^T H B x + g^T B x
293 // s.t. || B x ||^2 = r^2
295 // Notice the equality in the constraint.
297 // This can be solved by forming the Lagrangian, solving for x(y), where
298 // y is the Lagrange multiplier, using the gradient of the objective, and
299 // putting x(y) back into the constraint. This results in a fourth order
300 // polynomial in y, which can be solved using e.g. the companion matrix.
301 // See the description of MakePolynomialForBoundaryConstrainedProblem for
302 // details. The result is up to four real roots y*, not all of which
303 // correspond to feasible points. The feasible points x(y*) have to be
304 // tested for optimality.
306 if (subspace_is_one_dimensional_) {
307 // The subspace is one-dimensional, so both the gradient and
308 // the Gauss-Newton step point towards the same direction.
309 // In this case, we move along the gradient until we reach the trust
311 dogleg_step = -(radius_ / gradient_.norm()) * gradient_;
312 dogleg_step_norm_ = radius_;
313 dogleg_step.array() /= diagonal_.array();
314 VLOG(3) << "Dogleg subspace step size (1D): " << dogleg_step_norm_
315 << " radius: " << radius_;
319 Vector2d minimum(0.0, 0.0);
320 if (!FindMinimumOnTrustRegionBoundary(&minimum)) {
321 // For the positive semi-definite case, a traditional dogleg step
322 // is taken in this case.
323 LOG(WARNING) << "Failed to compute polynomial roots. "
324 << "Taking traditional dogleg step instead.";
325 ComputeTraditionalDoglegStep(dogleg);
329 // Test first order optimality at the minimum.
330 // The first order KKT conditions state that the minimum x*
331 // has to satisfy either || x* ||^2 < r^2 (i.e. has to lie within
332 // the trust region), or
334 // (B x* + g) + y x* = 0
336 // for some positive scalar y.
337 // Here, as it is already known that the minimum lies on the boundary, the
338 // latter condition is tested. To allow for small imprecisions, we test if
339 // the angle between (B x* + g) and -x* is smaller than acos(0.99).
340 // The exact value of the cosine is arbitrary but should be close to 1.
342 // This condition should not be violated. If it is, the minimum was not
343 // correctly determined.
344 const double kCosineThreshold = 0.99;
345 const Vector2d grad_minimum = subspace_B_ * minimum + subspace_g_;
346 const double cosine_angle = -minimum.dot(grad_minimum) /
347 (minimum.norm() * grad_minimum.norm());
348 if (cosine_angle < kCosineThreshold) {
349 LOG(WARNING) << "First order optimality seems to be violated "
350 << "in the subspace method!\n"
351 << "Cosine of angle between x and B x + g is "
352 << cosine_angle << ".\n"
353 << "Taking a regular dogleg step instead.\n"
354 << "Please consider filing a bug report if this "
355 << "happens frequently or consistently.\n";
356 ComputeTraditionalDoglegStep(dogleg);
360 // Create the full step from the optimal 2d solution.
361 dogleg_step = subspace_basis_ * minimum;
362 dogleg_step_norm_ = radius_;
363 dogleg_step.array() /= diagonal_.array();
364 VLOG(3) << "Dogleg subspace step size: " << dogleg_step_norm_
365 << " radius: " << radius_;
368 // Build the polynomial that defines the optimal Lagrange multipliers.
369 // Let the Lagrangian be
371 // L(x, y) = 0.5 x^T B x + x^T g + y (0.5 x^T x - 0.5 r^2). (1)
373 // Stationary points of the Lagrangian are given by
375 // 0 = d L(x, y) / dx = Bx + g + y x (2)
376 // 0 = d L(x, y) / dy = 0.5 x^T x - 0.5 r^2 (3)
378 // For any given y, we can solve (2) for x as
380 // x(y) = -(B + y I)^-1 g . (4)
382 // As B + y I is 2x2, we form the inverse explicitly:
384 // (B + y I)^-1 = (1 / det(B + y I)) adj(B + y I) (5)
386 // where adj() denotes adjugation. This should be safe, as B is positive
387 // semi-definite and y is necessarily positive, so (B + y I) is indeed
389 // Plugging (5) into (4) and the result into (3), then dividing by 0.5 we
392 // 0 = (1 / det(B + y I))^2 g^T adj(B + y I)^T adj(B + y I) g - r^2
397 // det(B + y I)^2 r^2 = g^T adj(B + y I)^T adj(B + y I) g (7a)
398 // = g^T adj(B)^T adj(B) g
399 // + 2 y g^T adj(B)^T g + y^2 g^T g (7b)
403 // adj(B + y I) = adj(B) + y I = adj(B)^T + y I . (8)
405 // The left hand side can be expressed explicitly using
407 // det(B + y I) = det(B) + y tr(B) + y^2 . (9)
409 // So (7) is a polynomial in y of degree four.
410 // Bringing everything back to the left hand side, the coefficients can
415 // + y^2 (r^2 tr(B)^2 + 2 r^2 det(B) - g^T g)
416 // + y^1 (2 r^2 det(B) tr(B) - 2 g^T adj(B)^T g)
417 // + y^0 (r^2 det(B)^2 - g^T adj(B)^T adj(B) g)
419 Vector DoglegStrategy::MakePolynomialForBoundaryConstrainedProblem() const {
420 const double detB = subspace_B_.determinant();
421 const double trB = subspace_B_.trace();
422 const double r2 = radius_ * radius_;
424 B_adj << subspace_B_(1, 1) , -subspace_B_(0, 1),
425 -subspace_B_(1, 0) , subspace_B_(0, 0);
427 Vector polynomial(5);
429 polynomial(1) = 2.0 * r2 * trB;
430 polynomial(2) = r2 * (trB * trB + 2.0 * detB) - subspace_g_.squaredNorm();
431 polynomial(3) = -2.0 * (subspace_g_.transpose() * B_adj * subspace_g_
433 polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm();
438 // Given a Lagrange multiplier y that corresponds to a stationary point
439 // of the Lagrangian L(x, y), compute the corresponding x from the
442 // 0 = d L(x, y) / dx
443 // = B * x + g + y * x
444 // = (B + y * I) * x + g
446 DoglegStrategy::Vector2d DoglegStrategy::ComputeSubspaceStepFromRoot(
448 const Matrix2d B_i = subspace_B_ + y * Matrix2d::Identity();
449 return -B_i.partialPivLu().solve(subspace_g_);
452 // This function evaluates the quadratic model at a point x in the
453 // subspace spanned by subspace_basis_.
454 double DoglegStrategy::EvaluateSubspaceModel(const Vector2d& x) const {
455 return 0.5 * x.dot(subspace_B_ * x) + subspace_g_.dot(x);
458 // This function attempts to solve the boundary-constrained subspace problem
460 // min. 1/2 x^T B^T H B x + g^T B x
461 // s.t. || B x ||^2 = r^2
463 // where B is an orthonormal subspace basis and r is the trust-region radius.
465 // This is done by finding the roots of a fourth degree polynomial. If the
466 // root finding fails, the function returns false and minimum will be set
467 // to (0, 0). If it succeeds, true is returned.
469 // In the failure case, another step should be taken, such as the traditional
471 bool DoglegStrategy::FindMinimumOnTrustRegionBoundary(Vector2d* minimum) const {
472 CHECK_NOTNULL(minimum);
474 // Return (0, 0) in all error cases.
477 // Create the fourth-degree polynomial that is a necessary condition for
479 const Vector polynomial = MakePolynomialForBoundaryConstrainedProblem();
481 // Find the real parts y_i of its roots (not only the real roots).
483 if (!FindPolynomialRoots(polynomial, &roots_real, NULL)) {
484 // Failed to find the roots of the polynomial, i.e. the candidate
485 // solutions of the constrained problem. Report this back to the caller.
489 // For each root y, compute B x(y) and check for feasibility.
490 // Notice that there should always be four roots, as the leading term of
491 // the polynomial is r^2 and therefore non-zero. However, as some roots
492 // may be complex, the real parts are not necessarily unique.
493 double minimum_value = std::numeric_limits<double>::max();
494 bool valid_root_found = false;
495 for (int i = 0; i < roots_real.size(); ++i) {
496 const Vector2d x_i = ComputeSubspaceStepFromRoot(roots_real(i));
498 // Not all roots correspond to points on the trust region boundary.
499 // There are at most four candidate solutions. As we are interested
500 // in the minimum, it is safe to consider all of them after projecting
501 // them onto the trust region boundary.
502 if (x_i.norm() > 0) {
503 const double f_i = EvaluateSubspaceModel((radius_ / x_i.norm()) * x_i);
504 valid_root_found = true;
505 if (f_i < minimum_value) {
512 return valid_root_found;
515 LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep(
516 const PerSolveOptions& per_solve_options,
517 SparseMatrix* jacobian,
518 const double* residuals) {
519 const int n = jacobian->num_cols();
520 LinearSolver::Summary linear_solver_summary;
521 linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
523 // The Jacobian matrix is often quite poorly conditioned. Thus it is
524 // necessary to add a diagonal matrix at the bottom to prevent the
525 // linear solver from failing.
527 // We do this by computing the same diagonal matrix as the one used
528 // by Levenberg-Marquardt (other choices are possible), and scaling
529 // it by a small constant (independent of the trust region radius).
531 // If the solve fails, the multiplier to the diagonal is increased
532 // up to max_mu_ by a factor of mu_increase_factor_ every time. If
533 // the linear solver is still not successful, the strategy returns
534 // with LINEAR_SOLVER_FAILURE.
536 // Next time when a new Gauss-Newton step is requested, the
537 // multiplier starts out from the last successful solve.
539 // When a step is declared successful, the multiplier is decreased
540 // by half of mu_increase_factor_.
542 while (mu_ < max_mu_) {
543 // Dogleg, as far as I (sameeragarwal) understand it, requires a
544 // reasonably good estimate of the Gauss-Newton step. This means
545 // that we need to solve the normal equations more or less
546 // exactly. This is reflected in the values of the tolerances set
549 // For now, this strategy should only be used with exact
550 // factorization based solvers, for which these tolerances are
551 // automatically satisfied.
553 // The right way to combine inexact solves with trust region
554 // methods is to use Stiehaug's method.
555 LinearSolver::PerSolveOptions solve_options;
556 solve_options.q_tolerance = 0.0;
557 solve_options.r_tolerance = 0.0;
559 lm_diagonal_ = diagonal_ * std::sqrt(mu_);
560 solve_options.D = lm_diagonal_.data();
562 // As in the LevenbergMarquardtStrategy, solve Jy = r instead
563 // of Jx = -r and later set x = -y to avoid having to modify
564 // either jacobian or residuals.
565 InvalidateArray(n, gauss_newton_step_.data());
566 linear_solver_summary = linear_solver_->Solve(jacobian,
569 gauss_newton_step_.data());
571 if (per_solve_options.dump_format_type == CONSOLE ||
572 (per_solve_options.dump_format_type != CONSOLE &&
573 !per_solve_options.dump_filename_base.empty())) {
574 if (!DumpLinearLeastSquaresProblem(per_solve_options.dump_filename_base,
575 per_solve_options.dump_format_type,
579 gauss_newton_step_.data(),
581 LOG(ERROR) << "Unable to dump trust region problem."
582 << " Filename base: "
583 << per_solve_options.dump_filename_base;
587 if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
588 return linear_solver_summary;
591 if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE ||
592 !IsArrayValid(n, gauss_newton_step_.data())) {
593 mu_ *= mu_increase_factor_;
594 VLOG(2) << "Increasing mu " << mu_;
595 linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
601 if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) {
602 // The scaled Gauss-Newton step is D * GN:
604 // - (D^-1 J^T J D^-1)^-1 (D^-1 g)
605 // = - D (J^T J)^-1 D D^-1 g
608 gauss_newton_step_.array() *= -diagonal_.array();
611 return linear_solver_summary;
614 void DoglegStrategy::StepAccepted(double step_quality) {
615 CHECK_GT(step_quality, 0.0);
617 if (step_quality < decrease_threshold_) {
621 if (step_quality > increase_threshold_) {
622 radius_ = std::max(radius_, 3.0 * dogleg_step_norm_);
625 // Reduce the regularization multiplier, in the hope that whatever
626 // was causing the rank deficiency has gone away and we can return
627 // to doing a pure Gauss-Newton solve.
628 mu_ = std::max(min_mu_, 2.0 * mu_ / mu_increase_factor_);
632 void DoglegStrategy::StepRejected(double step_quality) {
637 void DoglegStrategy::StepIsInvalid() {
638 mu_ *= mu_increase_factor_;
642 double DoglegStrategy::Radius() const {
646 bool DoglegStrategy::ComputeSubspaceModel(SparseMatrix* jacobian) {
647 // Compute an orthogonal basis for the subspace using QR decomposition.
648 Matrix basis_vectors(jacobian->num_cols(), 2);
649 basis_vectors.col(0) = gradient_;
650 basis_vectors.col(1) = gauss_newton_step_;
651 Eigen::ColPivHouseholderQR<Matrix> basis_qr(basis_vectors);
653 switch (basis_qr.rank()) {
655 // This should never happen, as it implies that both the gradient
656 // and the Gauss-Newton step are zero. In this case, the minimizer should
657 // have stopped due to the gradient being too small.
658 LOG(ERROR) << "Rank of subspace basis is 0. "
659 << "This means that the gradient at the current iterate is "
660 << "zero but the optimization has not been terminated. "
661 << "You may have found a bug in Ceres.";
665 // Gradient and Gauss-Newton step coincide, so we lie on one of the
666 // major axes of the quadratic problem. In this case, we simply move
667 // along the gradient until we reach the trust region boundary.
668 subspace_is_one_dimensional_ = true;
672 subspace_is_one_dimensional_ = false;
676 LOG(ERROR) << "Rank of the subspace basis matrix is reported to be "
677 << "greater than 2. As the matrix contains only two "
678 << "columns this cannot be true and is indicative of "
683 // The subspace is two-dimensional, so compute the subspace model.
684 // Given the basis U, this is
686 // subspace_g_ = g_scaled^T U
690 // subspace_B_ = U^T (J_scaled^T J_scaled) U
692 // As J_scaled = J * D^-1, the latter becomes
694 // subspace_B_ = ((U^T D^-1) J^T) (J (D^-1 U))
695 // = (J (D^-1 U))^T (J (D^-1 U))
698 basis_qr.householderQ() * Matrix::Identity(jacobian->num_cols(), 2);
700 subspace_g_ = subspace_basis_.transpose() * gradient_;
702 Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor>
703 Jb(2, jacobian->num_rows());
707 tmp = (subspace_basis_.col(0).array() / diagonal_.array()).matrix();
708 jacobian->RightMultiply(tmp.data(), Jb.row(0).data());
709 tmp = (subspace_basis_.col(1).array() / diagonal_.array()).matrix();
710 jacobian->RightMultiply(tmp.data(), Jb.row(1).data());
712 subspace_B_ = Jb * Jb.transpose();
717 } // namespace internal