Imported Upstream version ceres 1.13.0
[platform/upstream/ceres-solver.git] / examples / robust_curve_fitting.cc
1 // Ceres Solver - A fast non-linear least squares minimizer
2 // Copyright 2015 Google Inc. All rights reserved.
3 // http://ceres-solver.org/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are met:
7 //
8 // * Redistributions of source code must retain the above copyright notice,
9 //   this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright notice,
11 //   this list of conditions and the following disclaimer in the documentation
12 //   and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors may be
14 //   used to endorse or promote products derived from this software without
15 //   specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 // POSSIBILITY OF SUCH DAMAGE.
28 //
29 // Author: sameeragarwal@google.com (Sameer Agarwal)
30
31 #include "ceres/ceres.h"
32 #include "glog/logging.h"
33
34 // Data generated using the following octave code.
35 //   randn('seed', 23497);
36 //   m = 0.3;
37 //   c = 0.1;
38 //   x=[0:0.075:5];
39 //   y = exp(m * x + c);
40 //   noise = randn(size(x)) * 0.2;
41 //   outlier_noise = rand(size(x)) < 0.05;
42 //   y_observed = y + noise + outlier_noise;
43 //   data = [x', y_observed'];
44
45 const int kNumObservations = 67;
46 const double data[] = {
47 0.000000e+00, 1.133898e+00,
48 7.500000e-02, 1.334902e+00,
49 1.500000e-01, 1.213546e+00,
50 2.250000e-01, 1.252016e+00,
51 3.000000e-01, 1.392265e+00,
52 3.750000e-01, 1.314458e+00,
53 4.500000e-01, 1.472541e+00,
54 5.250000e-01, 1.536218e+00,
55 6.000000e-01, 1.355679e+00,
56 6.750000e-01, 1.463566e+00,
57 7.500000e-01, 1.490201e+00,
58 8.250000e-01, 1.658699e+00,
59 9.000000e-01, 1.067574e+00,
60 9.750000e-01, 1.464629e+00,
61 1.050000e+00, 1.402653e+00,
62 1.125000e+00, 1.713141e+00,
63 1.200000e+00, 1.527021e+00,
64 1.275000e+00, 1.702632e+00,
65 1.350000e+00, 1.423899e+00,
66 1.425000e+00, 5.543078e+00, // Outlier point
67 1.500000e+00, 5.664015e+00, // Outlier point
68 1.575000e+00, 1.732484e+00,
69 1.650000e+00, 1.543296e+00,
70 1.725000e+00, 1.959523e+00,
71 1.800000e+00, 1.685132e+00,
72 1.875000e+00, 1.951791e+00,
73 1.950000e+00, 2.095346e+00,
74 2.025000e+00, 2.361460e+00,
75 2.100000e+00, 2.169119e+00,
76 2.175000e+00, 2.061745e+00,
77 2.250000e+00, 2.178641e+00,
78 2.325000e+00, 2.104346e+00,
79 2.400000e+00, 2.584470e+00,
80 2.475000e+00, 1.914158e+00,
81 2.550000e+00, 2.368375e+00,
82 2.625000e+00, 2.686125e+00,
83 2.700000e+00, 2.712395e+00,
84 2.775000e+00, 2.499511e+00,
85 2.850000e+00, 2.558897e+00,
86 2.925000e+00, 2.309154e+00,
87 3.000000e+00, 2.869503e+00,
88 3.075000e+00, 3.116645e+00,
89 3.150000e+00, 3.094907e+00,
90 3.225000e+00, 2.471759e+00,
91 3.300000e+00, 3.017131e+00,
92 3.375000e+00, 3.232381e+00,
93 3.450000e+00, 2.944596e+00,
94 3.525000e+00, 3.385343e+00,
95 3.600000e+00, 3.199826e+00,
96 3.675000e+00, 3.423039e+00,
97 3.750000e+00, 3.621552e+00,
98 3.825000e+00, 3.559255e+00,
99 3.900000e+00, 3.530713e+00,
100 3.975000e+00, 3.561766e+00,
101 4.050000e+00, 3.544574e+00,
102 4.125000e+00, 3.867945e+00,
103 4.200000e+00, 4.049776e+00,
104 4.275000e+00, 3.885601e+00,
105 4.350000e+00, 4.110505e+00,
106 4.425000e+00, 4.345320e+00,
107 4.500000e+00, 4.161241e+00,
108 4.575000e+00, 4.363407e+00,
109 4.650000e+00, 4.161576e+00,
110 4.725000e+00, 4.619728e+00,
111 4.800000e+00, 4.737410e+00,
112 4.875000e+00, 4.727863e+00,
113 4.950000e+00, 4.669206e+00
114 };
115
116 using ceres::AutoDiffCostFunction;
117 using ceres::CostFunction;
118 using ceres::CauchyLoss;
119 using ceres::Problem;
120 using ceres::Solve;
121 using ceres::Solver;
122
123 struct ExponentialResidual {
124   ExponentialResidual(double x, double y)
125       : x_(x), y_(y) {}
126
127   template <typename T> bool operator()(const T* const m,
128                                         const T* const c,
129                                         T* residual) const {
130     residual[0] = y_ - exp(m[0] * x_ + c[0]);
131     return true;
132   }
133
134  private:
135   const double x_;
136   const double y_;
137 };
138
139 int main(int argc, char** argv) {
140   google::InitGoogleLogging(argv[0]);
141
142   double m = 0.0;
143   double c = 0.0;
144
145   Problem problem;
146   for (int i = 0; i < kNumObservations; ++i) {
147     CostFunction* cost_function =
148         new AutoDiffCostFunction<ExponentialResidual, 1, 1, 1>(
149             new ExponentialResidual(data[2 * i], data[2 * i + 1]));
150     problem.AddResidualBlock(cost_function,
151                              new CauchyLoss(0.5),
152                              &m, &c);
153   }
154
155   Solver::Options options;
156   options.linear_solver_type = ceres::DENSE_QR;
157   options.minimizer_progress_to_stdout = true;
158
159   Solver::Summary summary;
160   Solve(options, &problem, &summary);
161   std::cout << summary.BriefReport() << "\n";
162   std::cout << "Initial m: " << 0.0 << " c: " << 0.0 << "\n";
163   std::cout << "Final   m: " << m << " c: " << c << "\n";
164   return 0;
165 }