Imported Upstream version ceres 1.13.0
[platform/upstream/ceres-solver.git] / internal / ceres / program_evaluator.h
1 // Ceres Solver - A fast non-linear least squares minimizer
2 // Copyright 2015 Google Inc. All rights reserved.
3 // http://ceres-solver.org/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are met:
7 //
8 // * Redistributions of source code must retain the above copyright notice,
9 //   this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright notice,
11 //   this list of conditions and the following disclaimer in the documentation
12 //   and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors may be
14 //   used to endorse or promote products derived from this software without
15 //   specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 // POSSIBILITY OF SUCH DAMAGE.
28 //
29 // Author: keir@google.com (Keir Mierle)
30 //
31 // The ProgramEvaluator runs the cost functions contained in each residual block
32 // and stores the result into a jacobian. The particular type of jacobian is
33 // abstracted out using two template parameters:
34 //
35 //   - An "EvaluatePreparer" that is responsible for creating the array with
36 //     pointers to the jacobian blocks where the cost function evaluates to.
37 //   - A "JacobianWriter" that is responsible for storing the resulting
38 //     jacobian blocks in the passed sparse matrix.
39 //
40 // This abstraction affords an efficient evaluator implementation while still
41 // supporting writing to multiple sparse matrix formats. For example, when the
42 // ProgramEvaluator is parameterized for writing to block sparse matrices, the
43 // residual jacobians are written directly into their final position in the
44 // block sparse matrix by the user's CostFunction; there is no copying.
45 //
46 // The evaluation is threaded with OpenMP.
47 //
48 // The EvaluatePreparer and JacobianWriter interfaces are as follows:
49 //
50 //   class EvaluatePreparer {
51 //     // Prepare the jacobians array for use as the destination of a call to
52 //     // a cost function's evaluate method.
53 //     void Prepare(const ResidualBlock* residual_block,
54 //                  int residual_block_index,
55 //                  SparseMatrix* jacobian,
56 //                  double** jacobians);
57 //   }
58 //
59 //   class JacobianWriter {
60 //     // Create a jacobian that this writer can write. Same as
61 //     // Evaluator::CreateJacobian.
62 //     SparseMatrix* CreateJacobian() const;
63 //
64 //     // Create num_threads evaluate preparers. Caller owns result which must
65 //     // be freed with delete[]. Resulting preparers are valid while *this is.
66 //     EvaluatePreparer* CreateEvaluatePreparers(int num_threads);
67 //
68 //     // Write the block jacobians from a residual block evaluation to the
69 //     // larger sparse jacobian.
70 //     void Write(int residual_id,
71 //                int residual_offset,
72 //                double** jacobians,
73 //                SparseMatrix* jacobian);
74 //   }
75 //
76 // Note: The ProgramEvaluator is not thread safe, since internally it maintains
77 // some per-thread scratch space.
78
79 #ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_
80 #define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
81
82 // This include must come before any #ifndef check on Ceres compile options.
83 #include "ceres/internal/port.h"
84
85 #ifdef CERES_USE_OPENMP
86 #include <omp.h>
87 #endif
88
89 #include <map>
90 #include <string>
91 #include <vector>
92 #include "ceres/execution_summary.h"
93 #include "ceres/internal/eigen.h"
94 #include "ceres/internal/scoped_ptr.h"
95 #include "ceres/parameter_block.h"
96 #include "ceres/program.h"
97 #include "ceres/residual_block.h"
98 #include "ceres/small_blas.h"
99
100 namespace ceres {
101 namespace internal {
102
103 struct NullJacobianFinalizer {
104   void operator()(SparseMatrix* jacobian, int num_parameters) {}
105 };
106
107 template<typename EvaluatePreparer,
108          typename JacobianWriter,
109          typename JacobianFinalizer = NullJacobianFinalizer>
110 class ProgramEvaluator : public Evaluator {
111  public:
112   ProgramEvaluator(const Evaluator::Options &options, Program* program)
113       : options_(options),
114         program_(program),
115         jacobian_writer_(options, program),
116         evaluate_preparers_(
117             jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) {
118 #ifndef CERES_USE_OPENMP
119     if (options_.num_threads > 1) {
120       LOG(WARNING)
121           << "OpenMP support is not compiled into this binary; "
122           << "only options.num_threads = 1 is supported. Switching "
123           << "to single threaded mode.";
124       options_.num_threads = 1;
125     }
126 #endif
127
128     BuildResidualLayout(*program, &residual_layout_);
129     evaluate_scratch_.reset(CreateEvaluatorScratch(*program,
130                                                    options.num_threads));
131   }
132
133   // Implementation of Evaluator interface.
134   SparseMatrix* CreateJacobian() const {
135     return jacobian_writer_.CreateJacobian();
136   }
137
138   bool Evaluate(const Evaluator::EvaluateOptions& evaluate_options,
139                 const double* state,
140                 double* cost,
141                 double* residuals,
142                 double* gradient,
143                 SparseMatrix* jacobian) {
144     ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_);
145     ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL
146                                          ? "Evaluator::Residual"
147                                          : "Evaluator::Jacobian",
148                                          &execution_summary_);
149
150     // The parameters are stateful, so set the state before evaluating.
151     if (!program_->StateVectorToParameterBlocks(state)) {
152       return false;
153     }
154
155     if (residuals != NULL) {
156       VectorRef(residuals, program_->NumResiduals()).setZero();
157     }
158
159     if (jacobian != NULL) {
160       jacobian->SetZero();
161     }
162
163     // Each thread gets it's own cost and evaluate scratch space.
164     for (int i = 0; i < options_.num_threads; ++i) {
165       evaluate_scratch_[i].cost = 0.0;
166       if (gradient != NULL) {
167         VectorRef(evaluate_scratch_[i].gradient.get(),
168                   program_->NumEffectiveParameters()).setZero();
169       }
170     }
171
172     // This bool is used to disable the loop if an error is encountered
173     // without breaking out of it. The remaining loop iterations are still run,
174     // but with an empty body, and so will finish quickly.
175     bool abort = false;
176     int num_residual_blocks = program_->NumResidualBlocks();
177 #pragma omp parallel for num_threads(options_.num_threads)
178     for (int i = 0; i < num_residual_blocks; ++i) {
179 // Disable the loop instead of breaking, as required by OpenMP.
180 #pragma omp flush(abort)
181       if (abort) {
182         continue;
183       }
184
185 #ifdef CERES_USE_OPENMP
186       int thread_id = omp_get_thread_num();
187 #else
188       int thread_id = 0;
189 #endif
190       EvaluatePreparer* preparer = &evaluate_preparers_[thread_id];
191       EvaluateScratch* scratch = &evaluate_scratch_[thread_id];
192
193       // Prepare block residuals if requested.
194       const ResidualBlock* residual_block = program_->residual_blocks()[i];
195       double* block_residuals = NULL;
196       if (residuals != NULL) {
197         block_residuals = residuals + residual_layout_[i];
198       } else if (gradient != NULL) {
199         block_residuals = scratch->residual_block_residuals.get();
200       }
201
202       // Prepare block jacobians if requested.
203       double** block_jacobians = NULL;
204       if (jacobian != NULL || gradient != NULL) {
205         preparer->Prepare(residual_block,
206                           i,
207                           jacobian,
208                           scratch->jacobian_block_ptrs.get());
209         block_jacobians = scratch->jacobian_block_ptrs.get();
210       }
211
212       // Evaluate the cost, residuals, and jacobians.
213       double block_cost;
214       if (!residual_block->Evaluate(
215               evaluate_options.apply_loss_function,
216               &block_cost,
217               block_residuals,
218               block_jacobians,
219               scratch->residual_block_evaluate_scratch.get())) {
220         abort = true;
221 // This ensures that the OpenMP threads have a consistent view of 'abort'. Do
222 // the flush inside the failure case so that there is usually only one
223 // synchronization point per loop iteration instead of two.
224 #pragma omp flush(abort)
225         continue;
226       }
227
228       scratch->cost += block_cost;
229
230       // Store the jacobians, if they were requested.
231       if (jacobian != NULL) {
232         jacobian_writer_.Write(i,
233                                residual_layout_[i],
234                                block_jacobians,
235                                jacobian);
236       }
237
238       // Compute and store the gradient, if it was requested.
239       if (gradient != NULL) {
240         int num_residuals = residual_block->NumResiduals();
241         int num_parameter_blocks = residual_block->NumParameterBlocks();
242         for (int j = 0; j < num_parameter_blocks; ++j) {
243           const ParameterBlock* parameter_block =
244               residual_block->parameter_blocks()[j];
245           if (parameter_block->IsConstant()) {
246             continue;
247           }
248
249           MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
250               block_jacobians[j],
251               num_residuals,
252               parameter_block->LocalSize(),
253               block_residuals,
254               scratch->gradient.get() + parameter_block->delta_offset());
255         }
256       }
257     }
258
259     if (!abort) {
260       const int num_parameters = program_->NumEffectiveParameters();
261
262       // Sum the cost and gradient (if requested) from each thread.
263       (*cost) = 0.0;
264       if (gradient != NULL) {
265         VectorRef(gradient, num_parameters).setZero();
266       }
267       for (int i = 0; i < options_.num_threads; ++i) {
268         (*cost) += evaluate_scratch_[i].cost;
269         if (gradient != NULL) {
270           VectorRef(gradient, num_parameters) +=
271               VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
272         }
273       }
274
275       // Finalize the Jacobian if it is available.
276       // `num_parameters` is passed to the finalizer so that additional
277       // storage can be reserved for additional diagonal elements if
278       // necessary.
279       if (jacobian != NULL) {
280         JacobianFinalizer f;
281         f(jacobian, num_parameters);
282       }
283     }
284     return !abort;
285   }
286
287   bool Plus(const double* state,
288             const double* delta,
289             double* state_plus_delta) const {
290     return program_->Plus(state, delta, state_plus_delta);
291   }
292
293   int NumParameters() const {
294     return program_->NumParameters();
295   }
296   int NumEffectiveParameters() const {
297     return program_->NumEffectiveParameters();
298   }
299
300   int NumResiduals() const {
301     return program_->NumResiduals();
302   }
303
304   virtual std::map<std::string, int> CallStatistics() const {
305     return execution_summary_.calls();
306   }
307
308   virtual std::map<std::string, double> TimeStatistics() const {
309     return execution_summary_.times();
310   }
311
312  private:
313   // Per-thread scratch space needed to evaluate and store each residual block.
314   struct EvaluateScratch {
315     void Init(int max_parameters_per_residual_block,
316               int max_scratch_doubles_needed_for_evaluate,
317               int max_residuals_per_residual_block,
318               int num_parameters) {
319       residual_block_evaluate_scratch.reset(
320           new double[max_scratch_doubles_needed_for_evaluate]);
321       gradient.reset(new double[num_parameters]);
322       VectorRef(gradient.get(), num_parameters).setZero();
323       residual_block_residuals.reset(
324           new double[max_residuals_per_residual_block]);
325       jacobian_block_ptrs.reset(
326           new double*[max_parameters_per_residual_block]);
327     }
328
329     double cost;
330     scoped_array<double> residual_block_evaluate_scratch;
331     // The gradient in the local parameterization.
332     scoped_array<double> gradient;
333     // Enough space to store the residual for the largest residual block.
334     scoped_array<double> residual_block_residuals;
335     scoped_array<double*> jacobian_block_ptrs;
336   };
337
338   static void BuildResidualLayout(const Program& program,
339                                   std::vector<int>* residual_layout) {
340     const std::vector<ResidualBlock*>& residual_blocks =
341         program.residual_blocks();
342     residual_layout->resize(program.NumResidualBlocks());
343     int residual_pos = 0;
344     for (int i = 0; i < residual_blocks.size(); ++i) {
345       const int num_residuals = residual_blocks[i]->NumResiduals();
346       (*residual_layout)[i] = residual_pos;
347       residual_pos += num_residuals;
348     }
349   }
350
351   // Create scratch space for each thread evaluating the program.
352   static EvaluateScratch* CreateEvaluatorScratch(const Program& program,
353                                                  int num_threads) {
354     int max_parameters_per_residual_block =
355         program.MaxParametersPerResidualBlock();
356     int max_scratch_doubles_needed_for_evaluate =
357         program.MaxScratchDoublesNeededForEvaluate();
358     int max_residuals_per_residual_block =
359         program.MaxResidualsPerResidualBlock();
360     int num_parameters = program.NumEffectiveParameters();
361
362     EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads];
363     for (int i = 0; i < num_threads; i++) {
364       evaluate_scratch[i].Init(max_parameters_per_residual_block,
365                                max_scratch_doubles_needed_for_evaluate,
366                                max_residuals_per_residual_block,
367                                num_parameters);
368     }
369     return evaluate_scratch;
370   }
371
372   Evaluator::Options options_;
373   Program* program_;
374   JacobianWriter jacobian_writer_;
375   scoped_array<EvaluatePreparer> evaluate_preparers_;
376   scoped_array<EvaluateScratch> evaluate_scratch_;
377   std::vector<int> residual_layout_;
378   ::ceres::internal::ExecutionSummary execution_summary_;
379 };
380
381 }  // namespace internal
382 }  // namespace ceres
383
384 #endif  // CERES_INTERNAL_PROGRAM_EVALUATOR_H_