1 // Ceres Solver - A fast non-linear least squares minimizer
2 // Copyright 2015 Google Inc. All rights reserved.
3 // http://ceres-solver.org/
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are met:
8 // * Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright notice,
11 // this list of conditions and the following disclaimer in the documentation
12 // and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors may be
14 // used to endorse or promote products derived from this software without
15 // specific prior written permission.
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 // POSSIBILITY OF SUCH DAMAGE.
29 // Author: sameeragarwal@google.com (Sameer Agarwal)
31 // Purpose: See .h file.
33 #include "ceres/loss_function.h"
41 void TrivialLoss::Evaluate(double s, double rho[3]) const {
47 void HuberLoss::Evaluate(double s, double rho[3]) const {
50 // 'r' is always positive.
51 const double r = sqrt(s);
52 rho[0] = 2.0 * a_ * r - b_;
53 rho[1] = std::max(std::numeric_limits<double>::min(), a_ / r);
54 rho[2] = - rho[1] / (2.0 * s);
63 void SoftLOneLoss::Evaluate(double s, double rho[3]) const {
64 const double sum = 1.0 + s * c_;
65 const double tmp = sqrt(sum);
66 // 'sum' and 'tmp' are always positive, assuming that 's' is.
67 rho[0] = 2.0 * b_ * (tmp - 1.0);
68 rho[1] = std::max(std::numeric_limits<double>::min(), 1.0 / tmp);
69 rho[2] = - (c_ * rho[1]) / (2.0 * sum);
72 void CauchyLoss::Evaluate(double s, double rho[3]) const {
73 const double sum = 1.0 + s * c_;
74 const double inv = 1.0 / sum;
75 // 'sum' and 'inv' are always positive, assuming that 's' is.
76 rho[0] = b_ * log(sum);
77 rho[1] = std::max(std::numeric_limits<double>::min(), inv);
78 rho[2] = - c_ * (inv * inv);
81 void ArctanLoss::Evaluate(double s, double rho[3]) const {
82 const double sum = 1 + s * s * b_;
83 const double inv = 1 / sum;
84 // 'sum' and 'inv' are always positive.
85 rho[0] = a_ * atan2(s, a_);
86 rho[1] = std::max(std::numeric_limits<double>::min(), inv);
87 rho[2] = -2.0 * s * b_ * (inv * inv);
90 TolerantLoss::TolerantLoss(double a, double b)
93 c_(b * log(1.0 + exp(-a / b))) {
98 void TolerantLoss::Evaluate(double s, double rho[3]) const {
99 const double x = (s - a_) / b_;
100 // The basic equation is rho[0] = b ln(1 + e^x). However, if e^x is too
101 // large, it will overflow. Since numerically 1 + e^x == e^x when the
102 // x is greater than about ln(2^53) for doubles, beyond this threshold
103 // we substitute x for ln(1 + e^x) as a numerically equivalent approximation.
104 static const double kLog2Pow53 = 36.7; // ln(MathLimits<double>::kEpsilon).
105 if (x > kLog2Pow53) {
106 rho[0] = s - a_ - c_;
110 const double e_x = exp(x);
111 rho[0] = b_ * log(1.0 + e_x) - c_;
112 rho[1] = std::max(std::numeric_limits<double>::min(), e_x / (1.0 + e_x));
113 rho[2] = 0.5 / (b_ * (1.0 + cosh(x)));
117 void TukeyLoss::Evaluate(double s, double* rho) const {
118 if (s <= a_squared_) {
120 const double value = 1.0 - s / a_squared_;
121 const double value_sq = value * value;
122 rho[0] = a_squared_ / 6.0 * (1.0 - value_sq * value);
123 rho[1] = 0.5 * value_sq;
124 rho[2] = -1.0 / a_squared_ * value;
127 rho[0] = a_squared_ / 6.0;
133 ComposedLoss::ComposedLoss(const LossFunction* f, Ownership ownership_f,
134 const LossFunction* g, Ownership ownership_g)
135 : f_(CHECK_NOTNULL(f)),
136 g_(CHECK_NOTNULL(g)),
137 ownership_f_(ownership_f),
138 ownership_g_(ownership_g) {
141 ComposedLoss::~ComposedLoss() {
142 if (ownership_f_ == DO_NOT_TAKE_OWNERSHIP) {
145 if (ownership_g_ == DO_NOT_TAKE_OWNERSHIP) {
150 void ComposedLoss::Evaluate(double s, double rho[3]) const {
151 double rho_f[3], rho_g[3];
152 g_->Evaluate(s, rho_g);
153 f_->Evaluate(rho_g[0], rho_f);
156 rho[1] = rho_f[1] * rho_g[1];
157 // f''(g(s)) * g'(s) * g'(s) + f'(g(s)) * g''(s).
158 rho[2] = rho_f[2] * rho_g[1] * rho_g[1] + rho_f[1] * rho_g[2];
161 void ScaledLoss::Evaluate(double s, double rho[3]) const {
162 if (rho_.get() == NULL) {
167 rho_->Evaluate(s, rho);