Blob Blame History Raw


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>Solving Non-linear Least Squares &mdash; Ceres Solver</title>
  

  
  

  

  
  
    

  

  
  
    <link rel="stylesheet" href="_static/css/theme.css" type="text/css" />
  

  

  
    <link rel="top" title="Ceres Solver" href="index.html"/>
        <link rel="next" title="Covariance Estimation" href="nnls_covariance.html"/>
        <link rel="prev" title="Modeling Non-linear Least Squares" href="nnls_modeling.html"/> 

  
  <script src="_static/js/modernizr.min.js"></script>

</head>

<body class="wy-body-for-nav" role="document">

  <div class="wy-grid-for-nav">

    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search">
          

          
            <a href="index.html" class="icon icon-home"> Ceres Solver
          

          
          </a>

          
            
            
              <div class="version">
                1.13
              </div>
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
                <ul class="current">
<li class="toctree-l1"><a class="reference internal" href="features.html">Why?</a></li>
<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
<li class="toctree-l1"><a class="reference internal" href="tutorial.html">Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="derivatives.html">On Derivatives</a></li>
<li class="toctree-l1"><a class="reference internal" href="nnls_modeling.html">Modeling Non-linear Least Squares</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">Solving Non-linear Least Squares</a><ul>
<li class="toctree-l2"><a class="reference internal" href="#introduction">Introduction</a></li>
<li class="toctree-l2"><a class="reference internal" href="#trust-region-methods">Trust Region Methods</a><ul>
<li class="toctree-l3"><a class="reference internal" href="#levenberg-marquardt">Levenberg-Marquardt</a></li>
<li class="toctree-l3"><a class="reference internal" href="#dogleg">Dogleg</a></li>
<li class="toctree-l3"><a class="reference internal" href="#inner-iterations">Inner Iterations</a></li>
<li class="toctree-l3"><a class="reference internal" href="#non-monotonic-steps">Non-monotonic Steps</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#line-search-methods">Line Search Methods</a></li>
<li class="toctree-l2"><a class="reference internal" href="#linearsolver">LinearSolver</a><ul>
<li class="toctree-l3"><a class="reference internal" href="#dense-qr"><code class="docutils literal"><span class="pre">DENSE_QR</span></code></a></li>
<li class="toctree-l3"><a class="reference internal" href="#dense-normal-cholesky-sparse-normal-cholesky"><code class="docutils literal"><span class="pre">DENSE_NORMAL_CHOLESKY</span></code> &amp; <code class="docutils literal"><span class="pre">SPARSE_NORMAL_CHOLESKY</span></code></a></li>
<li class="toctree-l3"><a class="reference internal" href="#cgnr"><code class="docutils literal"><span class="pre">CGNR</span></code></a></li>
<li class="toctree-l3"><a class="reference internal" href="#dense-schur-sparse-schur"><code class="docutils literal"><span class="pre">DENSE_SCHUR</span></code> &amp; <code class="docutils literal"><span class="pre">SPARSE_SCHUR</span></code></a></li>
<li class="toctree-l3"><a class="reference internal" href="#iterative-schur"><code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code></a></li>
<li class="toctree-l3"><a class="reference internal" href="#preconditioner">Preconditioner</a></li>
<li class="toctree-l3"><a class="reference internal" href="#ordering">Ordering</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#solver-options"><code class="docutils literal"><span class="pre">Solver::Options</span></code></a></li>
<li class="toctree-l2"><a class="reference internal" href="#parameterblockordering"><code class="docutils literal"><span class="pre">ParameterBlockOrdering</span></code></a></li>
<li class="toctree-l2"><a class="reference internal" href="#iterationcallback"><code class="docutils literal"><span class="pre">IterationCallback</span></code></a></li>
<li class="toctree-l2"><a class="reference internal" href="#crsmatrix"><code class="docutils literal"><span class="pre">CRSMatrix</span></code></a></li>
<li class="toctree-l2"><a class="reference internal" href="#solver-summary"><code class="docutils literal"><span class="pre">Solver::Summary</span></code></a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="nnls_covariance.html">Covariance Estimation</a></li>
<li class="toctree-l1"><a class="reference internal" href="gradient_solver.html">General Unconstrained Minimization</a></li>
<li class="toctree-l1"><a class="reference internal" href="faqs.html">FAQS, Tips &amp; Tricks</a></li>
<li class="toctree-l1"><a class="reference internal" href="users.html">Users</a></li>
<li class="toctree-l1"><a class="reference internal" href="contributing.html">Contributing</a></li>
<li class="toctree-l1"><a class="reference internal" href="version_history.html">Version History</a></li>
<li class="toctree-l1"><a class="reference internal" href="bibliography.html">Bibliography</a></li>
<li class="toctree-l1"><a class="reference internal" href="license.html">License</a></li>
</ul>

            
          
        </div>
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" role="navigation" aria-label="top navigation">
        <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
        <a href="index.html">Ceres Solver</a>
      </nav>


      
      <div class="wy-nav-content">
        <div class="rst-content">
          

 



<div role="navigation" aria-label="breadcrumbs navigation">
  <ul class="wy-breadcrumbs">
    <li><a href="index.html">Docs</a> &raquo;</li>
      
    <li>Solving Non-linear Least Squares</li>
      <li class="wy-breadcrumbs-aside">
        
          
        
      </li>
  </ul>
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="solving-non-linear-least-squares">
<span id="chapter-nnls-solving"></span><h1>Solving Non-linear Least Squares<a class="headerlink" href="#solving-non-linear-least-squares" title="Permalink to this headline">¶</a></h1>
<div class="section" id="introduction">
<h2>Introduction<a class="headerlink" href="#introduction" title="Permalink to this headline">¶</a></h2>
<p>Effective use of Ceres requires some familiarity with the basic
components of a non-linear least squares solver, so before we describe
how to configure and use the solver, we will take a brief look at how
some of the core optimization algorithms in Ceres work.</p>
<p>Let <span class="math">\(x \in \mathbb{R}^n\)</span> be an <span class="math">\(n\)</span>-dimensional vector of
variables, and
<span class="math">\(F(x) = \left[f_1(x), ... ,  f_{m}(x) \right]^{\top}\)</span> be a
<span class="math">\(m\)</span>-dimensional function of <span class="math">\(x\)</span>.  We are interested in
solving the optimization problem <a class="footnote-reference" href="#f1" id="id1">[1]</a></p>
<div class="math" id="equation-nonlinsq">
<span class="eqno">(1)</span>\[\begin{split}\arg \min_x \frac{1}{2}\|F(x)\|^2\ . \\
        L \le x \le U\end{split}\]</div>
<p>Where, <span class="math">\(L\)</span> and <span class="math">\(U\)</span> are lower and upper bounds on the
parameter vector <span class="math">\(x\)</span>.</p>
<p>Since the efficient global minimization of <a href="#equation-nonlinsq">(1)</a> for
general <span class="math">\(F(x)\)</span> is an intractable problem, we will have to settle
for finding a local minimum.</p>
<p>In the following, the Jacobian <span class="math">\(J(x)\)</span> of <span class="math">\(F(x)\)</span> is an
<span class="math">\(m\times n\)</span> matrix, where <span class="math">\(J_{ij}(x) = \partial_j f_i(x)\)</span>
and the gradient vector is <span class="math">\(g(x) = \nabla \frac{1}{2}\|F(x)\|^2
= J(x)^\top F(x)\)</span>.</p>
<p>The general strategy when solving non-linear optimization problems is
to solve a sequence of approximations to the original problem
<a class="reference internal" href="bibliography.html#nocedalwright" id="id2">[NocedalWright]</a>. At each iteration, the approximation is solved to
determine a correction <span class="math">\(\Delta x\)</span> to the vector <span class="math">\(x\)</span>. For
non-linear least squares, an approximation can be constructed by using
the linearization <span class="math">\(F(x+\Delta x) \approx F(x) + J(x)\Delta x\)</span>,
which leads to the following linear least squares problem:</p>
<div class="math" id="equation-linearapprox">
<span class="eqno">(2)</span>\[\min_{\Delta x} \frac{1}{2}\|J(x)\Delta x + F(x)\|^2\]</div>
<p>Unfortunately, naively solving a sequence of these problems and
updating <span class="math">\(x \leftarrow x+ \Delta x\)</span> leads to an algorithm that
may not converge.  To get a convergent algorithm, we need to control
the size of the step <span class="math">\(\Delta x\)</span>. Depending on how the size of
the step <span class="math">\(\Delta x\)</span> is controlled, non-linear optimization
algorithms can be divided into two major categories <a class="reference internal" href="bibliography.html#nocedalwright" id="id3">[NocedalWright]</a>.</p>
<ol class="arabic simple">
<li><strong>Trust Region</strong> The trust region approach approximates the
objective function using using a model function (often a quadratic)
over a subset of the search space known as the trust region. If the
model function succeeds in minimizing the true objective function
the trust region is expanded; conversely, otherwise it is
contracted and the model optimization problem is solved again.</li>
<li><strong>Line Search</strong> The line search approach first finds a descent
direction along which the objective function will be reduced and
then computes a step size that decides how far should move along
that direction. The descent direction can be computed by various
methods, such as gradient descent, Newton&#8217;s method and Quasi-Newton
method. The step size can be determined either exactly or
inexactly.</li>
</ol>
<p>Trust region methods are in some sense dual to line search methods:
trust region methods first choose a step size (the size of the trust
region) and then a step direction while line search methods first
choose a step direction and then a step size. Ceres implements
multiple algorithms in both categories.</p>
</div>
<div class="section" id="trust-region-methods">
<span id="section-trust-region-methods"></span><h2>Trust Region Methods<a class="headerlink" href="#trust-region-methods" title="Permalink to this headline">¶</a></h2>
<p>The basic trust region algorithm looks something like this.</p>
<blockquote>
<div><ol class="arabic">
<li><p class="first">Given an initial point <span class="math">\(x\)</span> and a trust region radius <span class="math">\(\mu\)</span>.</p>
</li>
<li><p class="first">Solve</p>
<div class="math">
\[\begin{split}\arg \min_{\Delta x}&amp; \frac{1}{2}\|J(x)\Delta x + F(x)\|^2 \\
\text{such that} &amp;\|D(x)\Delta x\|^2 \le \mu\\
&amp;L \le x + \Delta x \le U.\end{split}\]</div>
</li>
<li><p class="first"><span class="math">\(\rho = \frac{\displaystyle \|F(x + \Delta x)\|^2 -
\|F(x)\|^2}{\displaystyle \|J(x)\Delta x + F(x)\|^2 -
\|F(x)\|^2}\)</span></p>
</li>
<li><p class="first">if <span class="math">\(\rho &gt; \epsilon\)</span> then  <span class="math">\(x = x + \Delta x\)</span>.</p>
</li>
<li><p class="first">if <span class="math">\(\rho &gt; \eta_1\)</span> then <span class="math">\(\mu = 2  \mu\)</span></p>
</li>
<li><p class="first">else if <span class="math">\(\rho &lt; \eta_2\)</span> then <span class="math">\(\mu = 0.5 * \mu\)</span></p>
</li>
<li><p class="first">Go to 2.</p>
</li>
</ol>
</div></blockquote>
<p>Here, <span class="math">\(\mu\)</span> is the trust region radius, <span class="math">\(D(x)\)</span> is some
matrix used to define a metric on the domain of <span class="math">\(F(x)\)</span> and
<span class="math">\(\rho\)</span> measures the quality of the step <span class="math">\(\Delta x\)</span>, i.e.,
how well did the linear model predict the decrease in the value of the
non-linear objective. The idea is to increase or decrease the radius
of the trust region depending on how well the linearization predicts
the behavior of the non-linear objective, which in turn is reflected
in the value of <span class="math">\(\rho\)</span>.</p>
<p>The key computational step in a trust-region algorithm is the solution
of the constrained optimization problem</p>
<div class="math" id="equation-trp">
<span class="eqno">(3)</span>\[\begin{split}\arg \min_{\Delta x}&amp;\quad \frac{1}{2}\|J(x)\Delta x + F(x)\|^2 \\
\text{such that} &amp;\quad \|D(x)\Delta x\|^2 \le \mu\\
 &amp;\quad L \le x + \Delta x \le U.\end{split}\]</div>
<p>There are a number of different ways of solving this problem, each
giving rise to a different concrete trust-region algorithm. Currently,
Ceres implements two trust-region algorithms - Levenberg-Marquardt
and Dogleg, each of which is augmented with a line search if bounds
constraints are present <a class="reference internal" href="bibliography.html#kanzow" id="id4">[Kanzow]</a>. The user can choose between them by
setting <a class="reference internal" href="#_CPPv2N6Solver7Options26trust_region_strategy_typeE" title="ceres::Solver::Options::trust_region_strategy_type"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::trust_region_strategy_type</span></code></a>.</p>
<p class="rubric">Footnotes</p>
<table class="docutils footnote" frame="void" id="f1" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label"><a class="fn-backref" href="#id1">[1]</a></td><td>At the level of the non-linear solver, the block structure is
not relevant, therefore our discussion here is in terms of an
optimization problem defined over a state vector of size
<span class="math">\(n\)</span>. Similarly the presence of loss functions is also
ignored as the problem is internally converted into a pure
non-linear least squares problem.</td></tr>
</tbody>
</table>
<div class="section" id="levenberg-marquardt">
<span id="section-levenberg-marquardt"></span><h3>Levenberg-Marquardt<a class="headerlink" href="#levenberg-marquardt" title="Permalink to this headline">¶</a></h3>
<p>The Levenberg-Marquardt algorithm <a class="reference internal" href="bibliography.html#levenberg" id="id5">[Levenberg]</a>  <a class="reference internal" href="bibliography.html#marquardt" id="id6">[Marquardt]</a> is the
most popular algorithm for solving non-linear least squares problems.
It was also the first trust region algorithm to be developed
<a class="reference internal" href="bibliography.html#levenberg" id="id7">[Levenberg]</a> <a class="reference internal" href="bibliography.html#marquardt" id="id8">[Marquardt]</a>. Ceres implements an exact step <a class="reference internal" href="bibliography.html#madsen" id="id9">[Madsen]</a>
and an inexact step variant of the Levenberg-Marquardt algorithm
<a class="reference internal" href="bibliography.html#wrightholt" id="id10">[WrightHolt]</a> <a class="reference internal" href="bibliography.html#nashsofer" id="id11">[NashSofer]</a>.</p>
<p>It can be shown, that the solution to <a href="#equation-trp">(3)</a> can be obtained by
solving an unconstrained optimization of the form</p>
<div class="math">
\[\arg\min_{\Delta x} \frac{1}{2}\|J(x)\Delta x + F(x)\|^2 +\lambda  \|D(x)\Delta x\|^2\]</div>
<p>Where, <span class="math">\(\lambda\)</span> is a Lagrange multiplier that is inverse
related to <span class="math">\(\mu\)</span>. In Ceres, we solve for</p>
<div class="math" id="equation-lsqr">
<span class="eqno">(4)</span>\[\arg\min_{\Delta x} \frac{1}{2}\|J(x)\Delta x + F(x)\|^2 + \frac{1}{\mu} \|D(x)\Delta x\|^2\]</div>
<p>The matrix <span class="math">\(D(x)\)</span> is a non-negative diagonal matrix, typically
the square root of the diagonal of the matrix <span class="math">\(J(x)^\top J(x)\)</span>.</p>
<p>Before going further, let us make some notational simplifications. We
will assume that the matrix <span class="math">\(\sqrt{\mu} D\)</span> has been concatenated
at the bottom of the matrix <span class="math">\(J\)</span> and similarly a vector of zeros
has been added to the bottom of the vector <span class="math">\(f\)</span> and the rest of
our discussion will be in terms of <span class="math">\(J\)</span> and <span class="math">\(f\)</span>, i.e, the
linear least squares problem.</p>
<div class="math" id="equation-simple">
<span class="eqno">(5)</span>\[\min_{\Delta x} \frac{1}{2} \|J(x)\Delta x + f(x)\|^2 .\]</div>
<p>For all but the smallest problems the solution of <a href="#equation-simple">(5)</a> in
each iteration of the Levenberg-Marquardt algorithm is the dominant
computational cost in Ceres. Ceres provides a number of different
options for solving <a href="#equation-simple">(5)</a>. There are two major classes of
methods - factorization and iterative.</p>
<p>The factorization methods are based on computing an exact solution of
<a href="#equation-lsqr">(4)</a> using a Cholesky or a QR factorization and lead to an exact
step Levenberg-Marquardt algorithm. But it is not clear if an exact
solution of <a href="#equation-lsqr">(4)</a> is necessary at each step of the LM algorithm
to solve <a href="#equation-nonlinsq">(1)</a>. In fact, we have already seen evidence
that this may not be the case, as <a href="#equation-lsqr">(4)</a> is itself a regularized
version of <a href="#equation-linearapprox">(2)</a>. Indeed, it is possible to
construct non-linear optimization algorithms in which the linearized
problem is solved approximately. These algorithms are known as inexact
Newton or truncated Newton methods <a class="reference internal" href="bibliography.html#nocedalwright" id="id12">[NocedalWright]</a>.</p>
<p>An inexact Newton method requires two ingredients. First, a cheap
method for approximately solving systems of linear
equations. Typically an iterative linear solver like the Conjugate
Gradients method is used for this
purpose <a class="reference internal" href="bibliography.html#nocedalwright" id="id13">[NocedalWright]</a>. Second, a termination rule for
the iterative solver. A typical termination rule is of the form</p>
<div class="math" id="equation-inexact">
<span class="eqno">(6)</span>\[\|H(x) \Delta x + g(x)\| \leq \eta_k \|g(x)\|.\]</div>
<p>Here, <span class="math">\(k\)</span> indicates the Levenberg-Marquardt iteration number and
<span class="math">\(0 &lt; \eta_k &lt;1\)</span> is known as the forcing sequence.  <a class="reference internal" href="bibliography.html#wrightholt" id="id14">[WrightHolt]</a>
prove that a truncated Levenberg-Marquardt algorithm that uses an
inexact Newton step based on <a href="#equation-inexact">(6)</a> converges for any
sequence <span class="math">\(\eta_k \leq \eta_0 &lt; 1\)</span> and the rate of convergence
depends on the choice of the forcing sequence <span class="math">\(\eta_k\)</span>.</p>
<p>Ceres supports both exact and inexact step solution strategies. When
the user chooses a factorization based linear solver, the exact step
Levenberg-Marquardt algorithm is used. When the user chooses an
iterative linear solver, the inexact step Levenberg-Marquardt
algorithm is used.</p>
</div>
<div class="section" id="dogleg">
<span id="section-dogleg"></span><h3>Dogleg<a class="headerlink" href="#dogleg" title="Permalink to this headline">¶</a></h3>
<p>Another strategy for solving the trust region problem <a href="#equation-trp">(3)</a> was
introduced by M. J. D. Powell. The key idea there is to compute two
vectors</p>
<div class="math">
\[\begin{split}\Delta x^{\text{Gauss-Newton}} &amp;= \arg \min_{\Delta x}\frac{1}{2} \|J(x)\Delta x + f(x)\|^2.\\
\Delta x^{\text{Cauchy}} &amp;= -\frac{\|g(x)\|^2}{\|J(x)g(x)\|^2}g(x).\end{split}\]</div>
<p>Note that the vector <span class="math">\(\Delta x^{\text{Gauss-Newton}}\)</span> is the
solution to <a href="#equation-linearapprox">(2)</a> and <span class="math">\(\Delta
x^{\text{Cauchy}}\)</span> is the vector that minimizes the linear
approximation if we restrict ourselves to moving along the direction
of the gradient. Dogleg methods finds a vector <span class="math">\(\Delta x\)</span>
defined by <span class="math">\(\Delta x^{\text{Gauss-Newton}}\)</span> and <span class="math">\(\Delta
x^{\text{Cauchy}}\)</span> that solves the trust region problem. Ceres
supports two variants that can be chose by setting
<a class="reference internal" href="#_CPPv2N6Solver7Options11dogleg_typeE" title="ceres::Solver::Options::dogleg_type"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::dogleg_type</span></code></a>.</p>
<p><code class="docutils literal"><span class="pre">TRADITIONAL_DOGLEG</span></code> as described by Powell, constructs two line
segments using the Gauss-Newton and Cauchy vectors and finds the point
farthest along this line shaped like a dogleg (hence the name) that is
contained in the trust-region. For more details on the exact reasoning
and computations, please see Madsen et al <a class="reference internal" href="bibliography.html#madsen" id="id15">[Madsen]</a>.</p>
<p><code class="docutils literal"><span class="pre">SUBSPACE_DOGLEG</span></code> is a more sophisticated method that considers the
entire two dimensional subspace spanned by these two vectors and finds
the point that minimizes the trust region problem in this subspace
<a class="reference internal" href="bibliography.html#byrdschnabel" id="id16">[ByrdSchnabel]</a>.</p>
<p>The key advantage of the Dogleg over Levenberg-Marquardt is that if
the step computation for a particular choice of <span class="math">\(\mu\)</span> does not
result in sufficient decrease in the value of the objective function,
Levenberg-Marquardt solves the linear approximation from scratch with
a smaller value of <span class="math">\(\mu\)</span>. Dogleg on the other hand, only needs
to compute the interpolation between the Gauss-Newton and the Cauchy
vectors, as neither of them depend on the value of <span class="math">\(\mu\)</span>.</p>
<p>The Dogleg method can only be used with the exact factorization based
linear solvers.</p>
</div>
<div class="section" id="inner-iterations">
<span id="section-inner-iterations"></span><h3>Inner Iterations<a class="headerlink" href="#inner-iterations" title="Permalink to this headline">¶</a></h3>
<p>Some non-linear least squares problems have additional structure in
the way the parameter blocks interact that it is beneficial to modify
the way the trust region step is computed. For example, consider the
following regression problem</p>
<div class="math">
\[y = a_1 e^{b_1 x} + a_2 e^{b_3 x^2 + c_1}\]</div>
<p>Given a set of pairs <span class="math">\(\{(x_i, y_i)\}\)</span>, the user wishes to estimate
<span class="math">\(a_1, a_2, b_1, b_2\)</span>, and <span class="math">\(c_1\)</span>.</p>
<p>Notice that the expression on the left is linear in <span class="math">\(a_1\)</span> and
<span class="math">\(a_2\)</span>, and given any value for <span class="math">\(b_1, b_2\)</span> and <span class="math">\(c_1\)</span>,
it is possible to use linear regression to estimate the optimal values
of <span class="math">\(a_1\)</span> and <span class="math">\(a_2\)</span>. It&#8217;s possible to analytically
eliminate the variables <span class="math">\(a_1\)</span> and <span class="math">\(a_2\)</span> from the problem
entirely. Problems like these are known as separable least squares
problem and the most famous algorithm for solving them is the Variable
Projection algorithm invented by Golub &amp; Pereyra <a class="reference internal" href="bibliography.html#golubpereyra" id="id17">[GolubPereyra]</a>.</p>
<p>Similar structure can be found in the matrix factorization with
missing data problem. There the corresponding algorithm is known as
Wiberg&#8217;s algorithm <a class="reference internal" href="bibliography.html#wiberg" id="id18">[Wiberg]</a>.</p>
<p>Ruhe &amp; Wedin present an analysis of various algorithms for solving
separable non-linear least squares problems and refer to <em>Variable
Projection</em> as Algorithm I in their paper <a class="reference internal" href="bibliography.html#ruhewedin" id="id19">[RuheWedin]</a>.</p>
<p>Implementing Variable Projection is tedious and expensive. Ruhe &amp;
Wedin present a simpler algorithm with comparable convergence
properties, which they call Algorithm II.  Algorithm II performs an
additional optimization step to estimate <span class="math">\(a_1\)</span> and <span class="math">\(a_2\)</span>
exactly after computing a successful Newton step.</p>
<p>This idea can be generalized to cases where the residual is not
linear in <span class="math">\(a_1\)</span> and <span class="math">\(a_2\)</span>, i.e.,</p>
<div class="math">
\[y = f_1(a_1, e^{b_1 x}) + f_2(a_2, e^{b_3 x^2 + c_1})\]</div>
<p>In this case, we solve for the trust region step for the full problem,
and then use it as the starting point to further optimize just <cite>a_1</cite>
and <cite>a_2</cite>. For the linear case, this amounts to doing a single linear
least squares solve. For non-linear problems, any method for solving
the <span class="math">\(a_1\)</span> and <span class="math">\(a_2\)</span> optimization problems will do. The
only constraint on <span class="math">\(a_1\)</span> and <span class="math">\(a_2\)</span> (if they are two
different parameter block) is that they do not co-occur in a residual
block.</p>
<p>This idea can be further generalized, by not just optimizing
<span class="math">\((a_1, a_2)\)</span>, but decomposing the graph corresponding to the
Hessian matrix&#8217;s sparsity structure into a collection of
non-overlapping independent sets and optimizing each of them.</p>
<p>Setting <a class="reference internal" href="#_CPPv2N6Solver7Options20use_inner_iterationsE" title="ceres::Solver::Options::use_inner_iterations"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::use_inner_iterations</span></code></a> to <code class="docutils literal"><span class="pre">true</span></code>
enables the use of this non-linear generalization of Ruhe &amp; Wedin&#8217;s
Algorithm II.  This version of Ceres has a higher iteration
complexity, but also displays better convergence behavior per
iteration.</p>
<p>Setting <a class="reference internal" href="#_CPPv2N6Solver7Options11num_threadsE" title="ceres::Solver::Options::num_threads"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::num_threads</span></code></a> to the maximum number
possible is highly recommended.</p>
</div>
<div class="section" id="non-monotonic-steps">
<span id="section-non-monotonic-steps"></span><h3>Non-monotonic Steps<a class="headerlink" href="#non-monotonic-steps" title="Permalink to this headline">¶</a></h3>
<p>Note that the basic trust-region algorithm described in
<a class="reference internal" href="#section-trust-region-methods"><span class="std std-ref">Trust Region Methods</span></a> is a descent algorithm in that it
only accepts a point if it strictly reduces the value of the objective
function.</p>
<p>Relaxing this requirement allows the algorithm to be more efficient in
the long term at the cost of some local increase in the value of the
objective function.</p>
<p>This is because allowing for non-decreasing objective function values
in a principled manner allows the algorithm to <em>jump over boulders</em> as
the method is not restricted to move into narrow valleys while
preserving its convergence properties.</p>
<p>Setting <a class="reference internal" href="#_CPPv2N6Solver7Options22use_nonmonotonic_stepsE" title="ceres::Solver::Options::use_nonmonotonic_steps"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::use_nonmonotonic_steps</span></code></a> to <code class="docutils literal"><span class="pre">true</span></code>
enables the non-monotonic trust region algorithm as described by Conn,
Gould &amp; Toint in <a class="reference internal" href="bibliography.html#conn" id="id20">[Conn]</a>.</p>
<p>Even though the value of the objective function may be larger
than the minimum value encountered over the course of the
optimization, the final parameters returned to the user are the
ones corresponding to the minimum cost over all iterations.</p>
<p>The option to take non-monotonic steps is available for all trust
region strategies.</p>
</div>
</div>
<div class="section" id="line-search-methods">
<span id="section-line-search-methods"></span><h2>Line Search Methods<a class="headerlink" href="#line-search-methods" title="Permalink to this headline">¶</a></h2>
<p>The line search method in Ceres Solver cannot handle bounds
constraints right now, so it can only be used for solving
unconstrained problems.</p>
<p>Line search algorithms</p>
<blockquote>
<div><ol class="arabic simple">
<li>Given an initial point <span class="math">\(x\)</span></li>
<li><span class="math">\(\Delta x = -H^{-1}(x) g(x)\)</span></li>
<li><span class="math">\(\arg \min_\mu \frac{1}{2} \| F(x + \mu \Delta x) \|^2\)</span></li>
<li><span class="math">\(x = x + \mu \Delta x\)</span></li>
<li>Goto 2.</li>
</ol>
</div></blockquote>
<p>Here <span class="math">\(H(x)\)</span> is some approximation to the Hessian of the
objective function, and <span class="math">\(g(x)\)</span> is the gradient at
<span class="math">\(x\)</span>. Depending on the choice of <span class="math">\(H(x)\)</span> we get a variety of
different search directions <span class="math">\(\Delta x\)</span>.</p>
<p>Step 4, which is a one dimensional optimization or <cite>Line Search</cite> along
<span class="math">\(\Delta x\)</span> is what gives this class of methods its name.</p>
<p>Different line search algorithms differ in their choice of the search
direction <span class="math">\(\Delta x\)</span> and the method used for one dimensional
optimization along <span class="math">\(\Delta x\)</span>. The choice of <span class="math">\(H(x)\)</span> is the
primary source of computational complexity in these
methods. Currently, Ceres Solver supports three choices of search
directions, all aimed at large scale problems.</p>
<ol class="arabic simple">
<li><code class="docutils literal"><span class="pre">STEEPEST_DESCENT</span></code> This corresponds to choosing <span class="math">\(H(x)\)</span> to
be the identity matrix. This is not a good search direction for
anything but the simplest of the problems. It is only included here
for completeness.</li>
<li><code class="docutils literal"><span class="pre">NONLINEAR_CONJUGATE_GRADIENT</span></code> A generalization of the Conjugate
Gradient method to non-linear functions. The generalization can be
performed in a number of different ways, resulting in a variety of
search directions. Ceres Solver currently supports
<code class="docutils literal"><span class="pre">FLETCHER_REEVES</span></code>, <code class="docutils literal"><span class="pre">POLAK_RIBIERE</span></code> and <code class="docutils literal"><span class="pre">HESTENES_STIEFEL</span></code>
directions.</li>
<li><code class="docutils literal"><span class="pre">BFGS</span></code> A generalization of the Secant method to multiple
dimensions in which a full, dense approximation to the inverse
Hessian is maintained and used to compute a quasi-Newton step
<a class="reference internal" href="bibliography.html#nocedalwright" id="id21">[NocedalWright]</a>.  BFGS is currently the best known general
quasi-Newton algorithm.</li>
<li><code class="docutils literal"><span class="pre">LBFGS</span></code> A limited memory approximation to the full <code class="docutils literal"><span class="pre">BFGS</span></code>
method in which the last <cite>M</cite> iterations are used to approximate the
inverse Hessian used to compute a quasi-Newton step <a class="reference internal" href="bibliography.html#nocedal" id="id22">[Nocedal]</a>,
<a class="reference internal" href="bibliography.html#byrdnocedal" id="id23">[ByrdNocedal]</a>.</li>
</ol>
<p>Currently Ceres Solver supports both a backtracking and interpolation
based Armijo line search algorithm, and a sectioning / zoom
interpolation (strong) Wolfe condition line search algorithm.
However, note that in order for the assumptions underlying the
<code class="docutils literal"><span class="pre">BFGS</span></code> and <code class="docutils literal"><span class="pre">LBFGS</span></code> methods to be guaranteed to be satisfied the
Wolfe line search algorithm should be used.</p>
</div>
<div class="section" id="linearsolver">
<span id="section-linear-solver"></span><h2>LinearSolver<a class="headerlink" href="#linearsolver" title="Permalink to this headline">¶</a></h2>
<p>Recall that in both of the trust-region methods described above, the
key computational cost is the solution of a linear least squares
problem of the form</p>
<div class="math" id="equation-simple2">
<span class="eqno">(7)</span>\[\min_{\Delta x} \frac{1}{2} \|J(x)\Delta x + f(x)\|^2 .\]</div>
<p>Let <span class="math">\(H(x)= J(x)^\top J(x)\)</span> and <span class="math">\(g(x) = -J(x)^\top
f(x)\)</span>. For notational convenience let us also drop the dependence on
<span class="math">\(x\)</span>. Then it is easy to see that solving <a href="#equation-simple2">(7)</a> is
equivalent to solving the <em>normal equations</em>.</p>
<div class="math" id="equation-normal">
<span class="eqno">(8)</span>\[H \Delta x = g\]</div>
<p>Ceres provides a number of different options for solving <a href="#equation-normal">(8)</a>.</p>
<div class="section" id="dense-qr">
<span id="section-qr"></span><h3><code class="docutils literal"><span class="pre">DENSE_QR</span></code><a class="headerlink" href="#dense-qr" title="Permalink to this headline">¶</a></h3>
<p>For small problems (a couple of hundred parameters and a few thousand
residuals) with relatively dense Jacobians, <code class="docutils literal"><span class="pre">DENSE_QR</span></code> is the method
of choice <a class="reference internal" href="bibliography.html#bjorck" id="id24">[Bjorck]</a>. Let <span class="math">\(J = QR\)</span> be the QR-decomposition of
<span class="math">\(J\)</span>, where <span class="math">\(Q\)</span> is an orthonormal matrix and <span class="math">\(R\)</span> is
an upper triangular matrix <a class="reference internal" href="bibliography.html#trefethenbau" id="id25">[TrefethenBau]</a>. Then it can be shown that
the solution to <a href="#equation-normal">(8)</a> is given by</p>
<div class="math">
\[\Delta x^* = -R^{-1}Q^\top f\]</div>
<p>Ceres uses <code class="docutils literal"><span class="pre">Eigen</span></code> &#8216;s dense QR factorization routines.</p>
</div>
<div class="section" id="dense-normal-cholesky-sparse-normal-cholesky">
<span id="section-cholesky"></span><h3><code class="docutils literal"><span class="pre">DENSE_NORMAL_CHOLESKY</span></code> &amp; <code class="docutils literal"><span class="pre">SPARSE_NORMAL_CHOLESKY</span></code><a class="headerlink" href="#dense-normal-cholesky-sparse-normal-cholesky" title="Permalink to this headline">¶</a></h3>
<p>Large non-linear least square problems are usually sparse. In such
cases, using a dense QR factorization is inefficient. Let <span class="math">\(H =
R^\top R\)</span> be the Cholesky factorization of the normal equations, where
<span class="math">\(R\)</span> is an upper triangular matrix, then the solution to
<a href="#equation-normal">(8)</a> is given by</p>
<div class="math">
\[\Delta x^* = R^{-1} R^{-\top} g.\]</div>
<p>The observant reader will note that the <span class="math">\(R\)</span> in the Cholesky
factorization of <span class="math">\(H\)</span> is the same upper triangular matrix
<span class="math">\(R\)</span> in the QR factorization of <span class="math">\(J\)</span>. Since <span class="math">\(Q\)</span> is an
orthonormal matrix, <span class="math">\(J=QR\)</span> implies that <span class="math">\(J^\top J = R^\top
Q^\top Q R = R^\top R\)</span>. There are two variants of Cholesky
factorization &#8211; sparse and dense.</p>
<p><code class="docutils literal"><span class="pre">DENSE_NORMAL_CHOLESKY</span></code>  as the name implies performs a dense
Cholesky factorization of the normal equations. Ceres uses
<code class="docutils literal"><span class="pre">Eigen</span></code> &#8216;s dense LDLT factorization routines.</p>
<p><code class="docutils literal"><span class="pre">SPARSE_NORMAL_CHOLESKY</span></code>, as the name implies performs a sparse
Cholesky factorization of the normal equations. This leads to
substantial savings in time and memory for large sparse
problems. Ceres uses the sparse Cholesky factorization routines in
Professor Tim Davis&#8217; <code class="docutils literal"><span class="pre">SuiteSparse</span></code> or <code class="docutils literal"><span class="pre">CXSparse</span></code> packages <a class="reference internal" href="bibliography.html#chen" id="id26">[Chen]</a>
or the sparse Cholesky factorization algorithm in <code class="docutils literal"><span class="pre">Eigen</span></code> (which
incidently is a port of the algorithm implemented inside <code class="docutils literal"><span class="pre">CXSparse</span></code>)</p>
</div>
<div class="section" id="cgnr">
<span id="section-cgnr"></span><h3><code class="docutils literal"><span class="pre">CGNR</span></code><a class="headerlink" href="#cgnr" title="Permalink to this headline">¶</a></h3>
<p>For general sparse problems, if the problem is too large for
<code class="docutils literal"><span class="pre">CHOLMOD</span></code> or a sparse linear algebra library is not linked into
Ceres, another option is the <code class="docutils literal"><span class="pre">CGNR</span></code> solver. This solver uses the
Conjugate Gradients solver on the <em>normal equations</em>, but without
forming the normal equations explicitly. It exploits the relation</p>
<div class="math">
\[H x = J^\top J x = J^\top(J x)\]</div>
<p>The convergence of Conjugate Gradients depends on the conditioner
number <span class="math">\(\kappa(H)\)</span>. Usually <span class="math">\(H\)</span> is poorly conditioned and
a <a class="reference internal" href="#section-preconditioner"><span class="std std-ref">Preconditioner</span></a> must be used to get reasonable
performance. Currently only the <code class="docutils literal"><span class="pre">JACOBI</span></code> preconditioner is available
for use with <code class="docutils literal"><span class="pre">CGNR</span></code>. It uses the block diagonal of <span class="math">\(H\)</span> to
precondition the normal equations.</p>
<p>When the user chooses <code class="docutils literal"><span class="pre">CGNR</span></code> as the linear solver, Ceres
automatically switches from the exact step algorithm to an inexact
step algorithm.</p>
</div>
<div class="section" id="dense-schur-sparse-schur">
<span id="section-schur"></span><h3><code class="docutils literal"><span class="pre">DENSE_SCHUR</span></code> &amp; <code class="docutils literal"><span class="pre">SPARSE_SCHUR</span></code><a class="headerlink" href="#dense-schur-sparse-schur" title="Permalink to this headline">¶</a></h3>
<p>While it is possible to use <code class="docutils literal"><span class="pre">SPARSE_NORMAL_CHOLESKY</span></code> to solve bundle
adjustment problems, bundle adjustment problem have a special
structure, and a more efficient scheme for solving <a href="#equation-normal">(8)</a>
can be constructed.</p>
<p>Suppose that the SfM problem consists of <span class="math">\(p\)</span> cameras and
<span class="math">\(q\)</span> points and the variable vector <span class="math">\(x\)</span> has the block
structure <span class="math">\(x = [y_{1}, ... ,y_{p},z_{1}, ... ,z_{q}]\)</span>. Where,
<span class="math">\(y\)</span> and <span class="math">\(z\)</span> correspond to camera and point parameters,
respectively.  Further, let the camera blocks be of size <span class="math">\(c\)</span> and
the point blocks be of size <span class="math">\(s\)</span> (for most problems <span class="math">\(c\)</span> =
<span class="math">\(6\)</span>&#8211;<cite>9</cite> and <span class="math">\(s = 3\)</span>). Ceres does not impose any constancy
requirement on these block sizes, but choosing them to be constant
simplifies the exposition.</p>
<p>A key characteristic of the bundle adjustment problem is that there is
no term <span class="math">\(f_{i}\)</span> that includes two or more point blocks.  This in
turn implies that the matrix <span class="math">\(H\)</span> is of the form</p>
<div class="math" id="equation-hblock">
<span class="eqno">(9)</span>\[\begin{split}H = \left[ \begin{matrix} B &amp; E\\ E^\top &amp; C \end{matrix} \right]\ ,\end{split}\]</div>
<p>where <span class="math">\(B \in \mathbb{R}^{pc\times pc}\)</span> is a block sparse matrix
with <span class="math">\(p\)</span> blocks of size <span class="math">\(c\times c\)</span> and <span class="math">\(C \in
\mathbb{R}^{qs\times qs}\)</span> is a block diagonal matrix with <span class="math">\(q\)</span> blocks
of size <span class="math">\(s\times s\)</span>. <span class="math">\(E \in \mathbb{R}^{pc\times qs}\)</span> is a
general block sparse matrix, with a block of size <span class="math">\(c\times s\)</span>
for each observation. Let us now block partition <span class="math">\(\Delta x =
[\Delta y,\Delta z]\)</span> and <span class="math">\(g=[v,w]\)</span> to restate <a href="#equation-normal">(8)</a>
as the block structured linear system</p>
<div class="math" id="equation-linear2">
<span class="eqno">(10)</span>\[\begin{split}\left[ \begin{matrix} B &amp; E\\ E^\top &amp; C \end{matrix}
             \right]\left[ \begin{matrix} \Delta y \\ \Delta z
                 \end{matrix} \right] = \left[ \begin{matrix} v\\ w
                 \end{matrix} \right]\ ,\end{split}\]</div>
<p>and apply Gaussian elimination to it. As we noted above, <span class="math">\(C\)</span> is
a block diagonal matrix, with small diagonal blocks of size
<span class="math">\(s\times s\)</span>.  Thus, calculating the inverse of <span class="math">\(C\)</span> by
inverting each of these blocks is cheap. This allows us to eliminate
<span class="math">\(\Delta z\)</span> by observing that <span class="math">\(\Delta z = C^{-1}(w - E^\top
\Delta y)\)</span>, giving us</p>
<div class="math" id="equation-schur">
<span class="eqno">(11)</span>\[\left[B - EC^{-1}E^\top\right] \Delta y = v - EC^{-1}w\ .\]</div>
<p>The matrix</p>
<div class="math">
\[S = B - EC^{-1}E^\top\]</div>
<p>is the Schur complement of <span class="math">\(C\)</span> in <span class="math">\(H\)</span>. It is also known as
the <em>reduced camera matrix</em>, because the only variables
participating in <a href="#equation-schur">(11)</a> are the ones corresponding to the
cameras. <span class="math">\(S \in \mathbb{R}^{pc\times pc}\)</span> is a block structured
symmetric positive definite matrix, with blocks of size <span class="math">\(c\times
c\)</span>. The block <span class="math">\(S_{ij}\)</span> corresponding to the pair of images
<span class="math">\(i\)</span> and <span class="math">\(j\)</span> is non-zero if and only if the two images
observe at least one common point.</p>
<p>Now, <a href="#equation-linear2">(10)</a> can be solved by first forming <span class="math">\(S\)</span>, solving for
<span class="math">\(\Delta y\)</span>, and then back-substituting <span class="math">\(\Delta y\)</span> to
obtain the value of <span class="math">\(\Delta z\)</span>.  Thus, the solution of what was
an <span class="math">\(n\times n\)</span>, <span class="math">\(n=pc+qs\)</span> linear system is reduced to the
inversion of the block diagonal matrix <span class="math">\(C\)</span>, a few matrix-matrix
and matrix-vector multiplies, and the solution of block sparse
<span class="math">\(pc\times pc\)</span> linear system <a href="#equation-schur">(11)</a>.  For almost all
problems, the number of cameras is much smaller than the number of
points, <span class="math">\(p \ll q\)</span>, thus solving <a href="#equation-schur">(11)</a> is
significantly cheaper than solving <a href="#equation-linear2">(10)</a>. This is the
<em>Schur complement trick</em> <a class="reference internal" href="bibliography.html#brown" id="id27">[Brown]</a>.</p>
<p>This still leaves open the question of solving <a href="#equation-schur">(11)</a>. The
method of choice for solving symmetric positive definite systems
exactly is via the Cholesky factorization <a class="reference internal" href="bibliography.html#trefethenbau" id="id28">[TrefethenBau]</a> and
depending upon the structure of the matrix, there are, in general, two
options. The first is direct factorization, where we store and factor
<span class="math">\(S\)</span> as a dense matrix <a class="reference internal" href="bibliography.html#trefethenbau" id="id29">[TrefethenBau]</a>. This method has
<span class="math">\(O(p^2)\)</span> space complexity and <span class="math">\(O(p^3)\)</span> time complexity and
is only practical for problems with up to a few hundred cameras. Ceres
implements this strategy as the <code class="docutils literal"><span class="pre">DENSE_SCHUR</span></code> solver.</p>
<p>But, <span class="math">\(S\)</span> is typically a fairly sparse matrix, as most images
only see a small fraction of the scene. This leads us to the second
option: Sparse Direct Methods. These methods store <span class="math">\(S\)</span> as a
sparse matrix, use row and column re-ordering algorithms to maximize
the sparsity of the Cholesky decomposition, and focus their compute
effort on the non-zero part of the factorization <a class="reference internal" href="bibliography.html#chen" id="id30">[Chen]</a>. Sparse
direct methods, depending on the exact sparsity structure of the Schur
complement, allow bundle adjustment algorithms to significantly scale
up over those based on dense factorization. Ceres implements this
strategy as the <code class="docutils literal"><span class="pre">SPARSE_SCHUR</span></code> solver.</p>
</div>
<div class="section" id="iterative-schur">
<span id="section-iterative-schur"></span><h3><code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code><a class="headerlink" href="#iterative-schur" title="Permalink to this headline">¶</a></h3>
<p>Another option for bundle adjustment problems is to apply
Preconditioned Conjugate Gradients to the reduced camera matrix
<span class="math">\(S\)</span> instead of <span class="math">\(H\)</span>. One reason to do this is that
<span class="math">\(S\)</span> is a much smaller matrix than <span class="math">\(H\)</span>, but more
importantly, it can be shown that <span class="math">\(\kappa(S)\leq \kappa(H)\)</span>.
Ceres implements Conjugate Gradients on <span class="math">\(S\)</span> as the
<code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code> solver. When the user chooses <code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code>
as the linear solver, Ceres automatically switches from the exact step
algorithm to an inexact step algorithm.</p>
<p>The key computational operation when using Conjuagate Gradients is the
evaluation of the matrix vector product <span class="math">\(Sx\)</span> for an arbitrary
vector <span class="math">\(x\)</span>. There are two ways in which this product can be
evaluated, and this can be controlled using
<code class="docutils literal"><span class="pre">Solver::Options::use_explicit_schur_complement</span></code>. Depending on the
problem at hand, the performance difference between these two methods
can be quite substantial.</p>
<blockquote>
<div><ol class="arabic">
<li><p class="first"><strong>Implicit</strong> This is default. Implicit evaluation is suitable for
large problems where the cost of computing and storing the Schur
Complement <span class="math">\(S\)</span> is prohibitive. Because PCG only needs
access to <span class="math">\(S\)</span> via its product with a vector, one way to
evaluate <span class="math">\(Sx\)</span> is to observe that</p>
<div class="math">
\[x_1 &amp;= E^\top x\]</div>
<div class="math">
\[x_2 &amp;= C^{-1} x_1\]</div>
<div class="math">
\[\begin{split}x_3 &amp;= Ex_2\\\end{split}\]</div>
<div class="math">
\[\begin{split}x_4 &amp;= Bx\\\end{split}\]</div>
<div class="math" id="equation-schurtrick1">
<span class="eqno">(12)</span>\[Sx &amp;= x_4 - x_3\]</div>
<p>Thus, we can run PCG on <span class="math">\(S\)</span> with the same computational
effort per iteration as PCG on <span class="math">\(H\)</span>, while reaping the
benefits of a more powerful preconditioner. In fact, we do not
even need to compute <span class="math">\(H\)</span>, <a href="#equation-schurtrick1">(12)</a> can be
implemented using just the columns of <span class="math">\(J\)</span>.</p>
<p>Equation <a href="#equation-schurtrick1">(12)</a> is closely related to <em>Domain
Decomposition methods</em> for solving large linear systems that
arise in structural engineering and partial differential
equations. In the language of Domain Decomposition, each point in
a bundle adjustment problem is a domain, and the cameras form the
interface between these domains. The iterative solution of the
Schur complement then falls within the sub-category of techniques
known as Iterative Sub-structuring <a class="reference internal" href="bibliography.html#saad" id="id31">[Saad]</a> <a class="reference internal" href="bibliography.html#mathew" id="id32">[Mathew]</a>.</p>
</li>
<li><p class="first"><strong>Explicit</strong> The complexity of implicit matrix-vector product
evaluation scales with the number of non-zeros in the
Jacobian. For small to medium sized problems, the cost of
constructing the Schur Complement is small enough that it is
better to construct it explicitly in memory and use it to
evaluate the product <span class="math">\(Sx\)</span>.</p>
</li>
</ol>
</div></blockquote>
<p>When the user chooses <code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code> as the linear solver, Ceres
automatically switches from the exact step algorithm to an inexact
step algorithm.</p>
<blockquote>
<div><div class="admonition note">
<p class="first admonition-title">Note</p>
<p class="last">In exact arithmetic, the choice of implicit versus explicit Schur
complement would have no impact on solution quality. However, in
practice if the Jacobian is poorly conditioned, one may observe
(usually small) differences in solution quality. This is a
natural consequence of performing computations in finite arithmetic.</p>
</div>
</div></blockquote>
</div>
<div class="section" id="preconditioner">
<span id="section-preconditioner"></span><h3>Preconditioner<a class="headerlink" href="#preconditioner" title="Permalink to this headline">¶</a></h3>
<p>The convergence rate of Conjugate Gradients for
solving <a href="#equation-normal">(8)</a> depends on the distribution of eigenvalues
of <span class="math">\(H\)</span> <a class="reference internal" href="bibliography.html#saad" id="id33">[Saad]</a>. A useful upper bound is
<span class="math">\(\sqrt{\kappa(H)}\)</span>, where, <span class="math">\(\kappa(H)\)</span> is the condition
number of the matrix <span class="math">\(H\)</span>. For most bundle adjustment problems,
<span class="math">\(\kappa(H)\)</span> is high and a direct application of Conjugate
Gradients to <a href="#equation-normal">(8)</a> results in extremely poor performance.</p>
<p>The solution to this problem is to replace <a href="#equation-normal">(8)</a> with a
<em>preconditioned</em> system.  Given a linear system, <span class="math">\(Ax =b\)</span> and a
preconditioner <span class="math">\(M\)</span> the preconditioned system is given by
<span class="math">\(M^{-1}Ax = M^{-1}b\)</span>. The resulting algorithm is known as
Preconditioned Conjugate Gradients algorithm (PCG) and its worst case
complexity now depends on the condition number of the <em>preconditioned</em>
matrix <span class="math">\(\kappa(M^{-1}A)\)</span>.</p>
<p>The computational cost of using a preconditioner <span class="math">\(M\)</span> is the cost
of computing <span class="math">\(M\)</span> and evaluating the product <span class="math">\(M^{-1}y\)</span> for
arbitrary vectors <span class="math">\(y\)</span>. Thus, there are two competing factors to
consider: How much of <span class="math">\(H\)</span>&#8216;s structure is captured by <span class="math">\(M\)</span>
so that the condition number <span class="math">\(\kappa(HM^{-1})\)</span> is low, and the
computational cost of constructing and using <span class="math">\(M\)</span>.  The ideal
preconditioner would be one for which <span class="math">\(\kappa(M^{-1}A)
=1\)</span>. <span class="math">\(M=A\)</span> achieves this, but it is not a practical choice, as
applying this preconditioner would require solving a linear system
equivalent to the unpreconditioned problem.  It is usually the case
that the more information <span class="math">\(M\)</span> has about <span class="math">\(H\)</span>, the more
expensive it is use. For example, Incomplete Cholesky factorization
based preconditioners have much better convergence behavior than the
Jacobi preconditioner, but are also much more expensive.</p>
<p>The simplest of all preconditioners is the diagonal or Jacobi
preconditioner, i.e., <span class="math">\(M=\operatorname{diag}(A)\)</span>, which for
block structured matrices like <span class="math">\(H\)</span> can be generalized to the
block Jacobi preconditioner. Ceres implements the block Jacobi
preconditioner and refers to it as <code class="docutils literal"><span class="pre">JACOBI</span></code>. When used with
<a class="reference internal" href="#section-cgnr"><span class="std std-ref">CGNR</span></a> it refers to the block diagonal of <span class="math">\(H\)</span> and
when used with <a class="reference internal" href="#section-iterative-schur"><span class="std std-ref">ITERATIVE_SCHUR</span></a> it refers to the block
diagonal of <span class="math">\(B\)</span> <a class="reference internal" href="bibliography.html#mandel" id="id34">[Mandel]</a>.</p>
<p>Another obvious choice for <a class="reference internal" href="#section-iterative-schur"><span class="std std-ref">ITERATIVE_SCHUR</span></a> is the block
diagonal of the Schur complement matrix <span class="math">\(S\)</span>, i.e, the block
Jacobi preconditioner for <span class="math">\(S\)</span>. Ceres implements it and refers to
is as the <code class="docutils literal"><span class="pre">SCHUR_JACOBI</span></code> preconditioner.</p>
<p>For bundle adjustment problems arising in reconstruction from
community photo collections, more effective preconditioners can be
constructed by analyzing and exploiting the camera-point visibility
structure of the scene <a class="reference internal" href="bibliography.html#kushalagarwal" id="id35">[KushalAgarwal]</a>. Ceres implements the two
visibility based preconditioners described by Kushal &amp; Agarwal as
<code class="docutils literal"><span class="pre">CLUSTER_JACOBI</span></code> and <code class="docutils literal"><span class="pre">CLUSTER_TRIDIAGONAL</span></code>. These are fairly new
preconditioners and Ceres&#8217; implementation of them is in its early
stages and is not as mature as the other preconditioners described
above.</p>
</div>
<div class="section" id="ordering">
<span id="section-ordering"></span><h3>Ordering<a class="headerlink" href="#ordering" title="Permalink to this headline">¶</a></h3>
<p>The order in which variables are eliminated in a linear solver can
have a significant of impact on the efficiency and accuracy of the
method. For example when doing sparse Cholesky factorization, there
are matrices for which a good ordering will give a Cholesky factor
with <span class="math">\(O(n)\)</span> storage, where as a bad ordering will result in an
completely dense factor.</p>
<p>Ceres allows the user to provide varying amounts of hints to the
solver about the variable elimination ordering to use. This can range
from no hints, where the solver is free to decide the best ordering
based on the user&#8217;s choices like the linear solver being used, to an
exact order in which the variables should be eliminated, and a variety
of possibilities in between.</p>
<p>Instances of the <a class="reference internal" href="#_CPPv2N5ceres22ParameterBlockOrderingE" title="ceres::ParameterBlockOrdering"><code class="xref cpp cpp-class docutils literal"><span class="pre">ParameterBlockOrdering</span></code></a> class are used to
communicate this information to Ceres.</p>
<p>Formally an ordering is an ordered partitioning of the parameter
blocks. Each parameter block belongs to exactly one group, and each
group has a unique integer associated with it, that determines its
order in the set of groups. We call these groups <em>Elimination Groups</em></p>
<p>Given such an ordering, Ceres ensures that the parameter blocks in the
lowest numbered elimination group are eliminated first, and then the
parameter blocks in the next lowest numbered elimination group and so
on. Within each elimination group, Ceres is free to order the
parameter blocks as it chooses. For example, consider the linear system</p>
<div class="math">
\[\begin{split}x + y &amp;= 3\\
2x + 3y &amp;= 7\end{split}\]</div>
<p>There are two ways in which it can be solved. First eliminating
<span class="math">\(x\)</span> from the two equations, solving for <span class="math">\(y\)</span> and then back
substituting for <span class="math">\(x\)</span>, or first eliminating <span class="math">\(y\)</span>, solving
for <span class="math">\(x\)</span> and back substituting for <span class="math">\(y\)</span>. The user can
construct three orderings here.</p>
<ol class="arabic simple">
<li><span class="math">\(\{0: x\}, \{1: y\}\)</span> : Eliminate <span class="math">\(x\)</span> first.</li>
<li><span class="math">\(\{0: y\}, \{1: x\}\)</span> : Eliminate <span class="math">\(y\)</span> first.</li>
<li><span class="math">\(\{0: x, y\}\)</span>        : Solver gets to decide the elimination order.</li>
</ol>
<p>Thus, to have Ceres determine the ordering automatically using
heuristics, put all the variables in the same elimination group. The
identity of the group does not matter. This is the same as not
specifying an ordering at all. To control the ordering for every
variable, create an elimination group per variable, ordering them in
the desired order.</p>
<p>If the user is using one of the Schur solvers (<code class="docutils literal"><span class="pre">DENSE_SCHUR</span></code>,
<code class="docutils literal"><span class="pre">SPARSE_SCHUR</span></code>, <code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code>) and chooses to specify an
ordering, it must have one important property. The lowest numbered
elimination group must form an independent set in the graph
corresponding to the Hessian, or in other words, no two parameter
blocks in in the first elimination group should co-occur in the same
residual block. For the best performance, this elimination group
should be as large as possible. For standard bundle adjustment
problems, this corresponds to the first elimination group containing
all the 3d points, and the second containing the all the cameras
parameter blocks.</p>
<p>If the user leaves the choice to Ceres, then the solver uses an
approximate maximum independent set algorithm to identify the first
elimination group <a class="reference internal" href="bibliography.html#lisaad" id="id36">[LiSaad]</a>.</p>
</div>
</div>
<div class="section" id="solver-options">
<span id="section-solver-options"></span><h2><a class="reference internal" href="#_CPPv2N5ceres6Solver7OptionsE" title="ceres::Solver::Options"><code class="xref cpp cpp-class docutils literal"><span class="pre">Solver::Options</span></code></a><a class="headerlink" href="#solver-options" title="Permalink to this headline">¶</a></h2>
<dl class="class">
<dt id="_CPPv2N5ceres6Solver7OptionsE">
<span id="ceres::Solver::Options"></span><em class="property">class </em><code class="descclassname">Solver::</code><code class="descname">Options</code><a class="headerlink" href="#_CPPv2N5ceres6Solver7OptionsE" title="Permalink to this definition">¶</a></dt>
<dd><p><a class="reference internal" href="#_CPPv2N5ceres6Solver7OptionsE" title="ceres::Solver::Options"><code class="xref cpp cpp-class docutils literal"><span class="pre">Solver::Options</span></code></a> controls the overall behavior of the
solver. We list the various settings and their default values below.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2NK5ceres6Solver7Options7IsValidEP6string">
<span id="ceres::Solver::Options::IsValid__stringPC"></span>bool <code class="descclassname">Solver::Options::</code><code class="descname">IsValid</code><span class="sig-paren">(</span>string *<em>error</em><span class="sig-paren">)</span> <em class="property">const</em><a class="headerlink" href="#_CPPv2NK5ceres6Solver7Options7IsValidEP6string" title="Permalink to this definition">¶</a></dt>
<dd><p>Validate the values in the options struct and returns true on
success. If there is a problem, the method returns false with
<code class="docutils literal"><span class="pre">error</span></code> containing a textual description of the cause.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options14minimizer_typeE">
<span id="ceres::Solver::Options::minimizer_type__MinimizerType"></span>MinimizerType <code class="descclassname">Solver::Options::</code><code class="descname">minimizer_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options14minimizer_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">TRUST_REGION</span></code></p>
<p>Choose between <code class="docutils literal"><span class="pre">LINE_SEARCH</span></code> and <code class="docutils literal"><span class="pre">TRUST_REGION</span></code> algorithms. See
<a class="reference internal" href="#section-trust-region-methods"><span class="std std-ref">Trust Region Methods</span></a> and
<a class="reference internal" href="#section-line-search-methods"><span class="std std-ref">Line Search Methods</span></a> for more details.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options26line_search_direction_typeE">
<span id="ceres::Solver::Options::line_search_direction_type__LineSearchDirectionType"></span>LineSearchDirectionType <code class="descclassname">Solver::Options::</code><code class="descname">line_search_direction_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options26line_search_direction_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">LBFGS</span></code></p>
<p>Choices are <code class="docutils literal"><span class="pre">STEEPEST_DESCENT</span></code>, <code class="docutils literal"><span class="pre">NONLINEAR_CONJUGATE_GRADIENT</span></code>,
<code class="docutils literal"><span class="pre">BFGS</span></code> and <code class="docutils literal"><span class="pre">LBFGS</span></code>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options16line_search_typeE">
<span id="ceres::Solver::Options::line_search_type__LineSearchType"></span>LineSearchType <code class="descclassname">Solver::Options::</code><code class="descname">line_search_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options16line_search_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">WOLFE</span></code></p>
<p>Choices are <code class="docutils literal"><span class="pre">ARMIJO</span></code> and <code class="docutils literal"><span class="pre">WOLFE</span></code> (strong Wolfe conditions).
Note that in order for the assumptions underlying the <code class="docutils literal"><span class="pre">BFGS</span></code> and
<code class="docutils literal"><span class="pre">LBFGS</span></code> line search direction algorithms to be guaranteed to be
satisifed, the <code class="docutils literal"><span class="pre">WOLFE</span></code> line search should be used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options33nonlinear_conjugate_gradient_typeE">
<span id="ceres::Solver::Options::nonlinear_conjugate_gradient_type__NonlinearConjugateGradientType"></span>NonlinearConjugateGradientType <code class="descclassname">Solver::Options::</code><code class="descname">nonlinear_conjugate_gradient_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options33nonlinear_conjugate_gradient_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">FLETCHER_REEVES</span></code></p>
<p>Choices are <code class="docutils literal"><span class="pre">FLETCHER_REEVES</span></code>, <code class="docutils literal"><span class="pre">POLAK_RIBIERE</span></code> and
<code class="docutils literal"><span class="pre">HESTENES_STIEFEL</span></code>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options14max_lbfgs_rankE">
<span id="ceres::Solver::Options::max_lbfgs_rank__i"></span>int <code class="descclassname">Solver::Options::</code><code class="descname">max_lbfgs_rank</code><a class="headerlink" href="#_CPPv2N6Solver7Options14max_lbfgs_rankE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: 20</p>
<p>The L-BFGS hessian approximation is a low rank approximation to the
inverse of the Hessian matrix. The rank of the approximation
determines (linearly) the space and time complexity of using the
approximation. Higher the rank, the better is the quality of the
approximation. The increase in quality is however is bounded for a
number of reasons.</p>
<blockquote>
<div><ol class="arabic simple">
<li>The method only uses secant information and not actual
derivatives.</li>
<li>The Hessian approximation is constrained to be positive
definite.</li>
</ol>
</div></blockquote>
<p>So increasing this rank to a large number will cost time and space
complexity without the corresponding increase in solution
quality. There are no hard and fast rules for choosing the maximum
rank. The best choice usually requires some problem specific
experimentation.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options39use_approximate_eigenvalue_bfgs_scalingE">
<span id="ceres::Solver::Options::use_approximate_eigenvalue_bfgs_scaling__b"></span>bool <code class="descclassname">Solver::Options::</code><code class="descname">use_approximate_eigenvalue_bfgs_scaling</code><a class="headerlink" href="#_CPPv2N6Solver7Options39use_approximate_eigenvalue_bfgs_scalingE" title="Permalink to this definition">¶</a></dt>
<dd><blockquote>
<div><p>Default: <code class="docutils literal"><span class="pre">false</span></code></p>
<p>As part of the <code class="docutils literal"><span class="pre">BFGS</span></code> update step / <code class="docutils literal"><span class="pre">LBFGS</span></code> right-multiply
step, the initial inverse Hessian approximation is taken to be the
Identity.  However, <a class="reference internal" href="bibliography.html#oren" id="id37">[Oren]</a> showed that using instead <span class="math">\(I *
\gamma\)</span>, where <span class="math">\(\gamma\)</span> is a scalar chosen to approximate an
eigenvalue of the true inverse Hessian can result in improved
convergence in a wide variety of cases.  Setting
<code class="docutils literal"><span class="pre">use_approximate_eigenvalue_bfgs_scaling</span></code> to true enables this
scaling in <code class="docutils literal"><span class="pre">BFGS</span></code> (before first iteration) and <code class="docutils literal"><span class="pre">LBFGS</span></code> (at each
iteration).</p>
<p>Precisely, approximate eigenvalue scaling equates to</p>
<div class="math">
\[\gamma = \frac{y_k' s_k}{y_k' y_k}\]</div>
<p>With:</p>
</div></blockquote>
<div class="math">
\[y_k = \nabla f_{k+1} - \nabla f_k\]</div>
<div class="math">
\[s_k = x_{k+1} - x_k\]</div>
<p>Where <span class="math">\(f()\)</span> is the line search objective and <span class="math">\(x\)</span> the
vector of parameter values <a class="reference internal" href="bibliography.html#nocedalwright" id="id38">[NocedalWright]</a>.</p>
<p>It is important to note that approximate eigenvalue scaling does
<strong>not</strong> <em>always</em> improve convergence, and that it can in fact
<em>significantly</em> degrade performance for certain classes of problem,
which is why it is disabled by default.  In particular it can
degrade performance when the sensitivity of the problem to different
parameters varies significantly, as in this case a single scalar
factor fails to capture this variation and detrimentally downscales
parts of the Jacobian approximation which correspond to
low-sensitivity parameters. It can also reduce the robustness of the
solution to errors in the Jacobians.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options30line_search_interpolation_typeE">
<span id="ceres::Solver::Options::line_search_interpolation_type__LineSearchIterpolationType"></span>LineSearchIterpolationType <code class="descclassname">Solver::Options::</code><code class="descname">line_search_interpolation_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options30line_search_interpolation_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">CUBIC</span></code></p>
<p>Degree of the polynomial used to approximate the objective
function. Valid values are <code class="docutils literal"><span class="pre">BISECTION</span></code>, <code class="docutils literal"><span class="pre">QUADRATIC</span></code> and
<code class="docutils literal"><span class="pre">CUBIC</span></code>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options25min_line_search_step_sizeE">
<span id="ceres::Solver::Options::min_line_search_step_size__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">min_line_search_step_size</code><a class="headerlink" href="#_CPPv2N6Solver7Options25min_line_search_step_sizeE" title="Permalink to this definition">¶</a></dt>
<dd><p>The line search terminates if:</p>
<div class="math">
\[\|\Delta x_k\|_\infty &lt; \text{min_line_search_step_size}\]</div>
<p>where <span class="math">\(\|\cdot\|_\infty\)</span> refers to the max norm, and
<span class="math">\(\Delta x_k\)</span> is the step change in the parameter values at
the <span class="math">\(k\)</span>-th iteration.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options40line_search_sufficient_function_decreaseE">
<span id="ceres::Solver::Options::line_search_sufficient_function_decrease__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">line_search_sufficient_function_decrease</code><a class="headerlink" href="#_CPPv2N6Solver7Options40line_search_sufficient_function_decreaseE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e-4</span></code></p>
<p>Solving the line search problem exactly is computationally
prohibitive. Fortunately, line search based optimization algorithms
can still guarantee convergence if instead of an exact solution,
the line search algorithm returns a solution which decreases the
value of the objective function sufficiently. More precisely, we
are looking for a step size s.t.</p>
<div class="math">
\[f(\text{step_size}) \le f(0) + \text{sufficient_decrease} * [f'(0) * \text{step_size}]\]</div>
<p>This condition is known as the Armijo condition.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options32max_line_search_step_contractionE">
<span id="ceres::Solver::Options::max_line_search_step_contraction__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">max_line_search_step_contraction</code><a class="headerlink" href="#_CPPv2N6Solver7Options32max_line_search_step_contractionE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e-3</span></code></p>
<p>In each iteration of the line search,</p>
<div class="math">
\[\text{new_step_size} &gt;= \text{max_line_search_step_contraction} * \text{step_size}\]</div>
<p>Note that by definition, for contraction:</p>
<div class="math">
\[0 &lt; \text{max_step_contraction} &lt; \text{min_step_contraction} &lt; 1\]</div>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options32min_line_search_step_contractionE">
<span id="ceres::Solver::Options::min_line_search_step_contraction__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">min_line_search_step_contraction</code><a class="headerlink" href="#_CPPv2N6Solver7Options32min_line_search_step_contractionE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">0.6</span></code></p>
<p>In each iteration of the line search,</p>
<div class="math">
\[\text{new_step_size} &lt;= \text{min_line_search_step_contraction} * \text{step_size}\]</div>
<p>Note that by definition, for contraction:</p>
<div class="math">
\[0 &lt; \text{max_step_contraction} &lt; \text{min_step_contraction} &lt; 1\]</div>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options40max_num_line_search_step_size_iterationsE">
<span id="ceres::Solver::Options::max_num_line_search_step_size_iterations__i"></span>int <code class="descclassname">Solver::Options::</code><code class="descname">max_num_line_search_step_size_iterations</code><a class="headerlink" href="#_CPPv2N6Solver7Options40max_num_line_search_step_size_iterationsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">20</span></code></p>
<p>Maximum number of trial step size iterations during each line
search, if a step size satisfying the search conditions cannot be
found within this number of trials, the line search will stop.</p>
<p>As this is an &#8216;artificial&#8217; constraint (one imposed by the user, not
the underlying math), if <code class="docutils literal"><span class="pre">WOLFE</span></code> line search is being used, <em>and</em>
points satisfying the Armijo sufficient (function) decrease
condition have been found during the current search (in <span class="math">\(&lt;=\)</span>
<code class="docutils literal"><span class="pre">max_num_line_search_step_size_iterations</span></code>).  Then, the step size
with the lowest function value which satisfies the Armijo condition
will be returned as the new valid step, even though it does <em>not</em>
satisfy the strong Wolfe conditions.  This behaviour protects
against early termination of the optimizer at a sub-optimal point.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options38max_num_line_search_direction_restartsE">
<span id="ceres::Solver::Options::max_num_line_search_direction_restarts__i"></span>int <code class="descclassname">Solver::Options::</code><code class="descname">max_num_line_search_direction_restarts</code><a class="headerlink" href="#_CPPv2N6Solver7Options38max_num_line_search_direction_restartsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">5</span></code></p>
<p>Maximum number of restarts of the line search direction algorithm
before terminating the optimization. Restarts of the line search
direction algorithm occur when the current algorithm fails to
produce a new descent direction. This typically indicates a
numerical failure, or a breakdown in the validity of the
approximations used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options41line_search_sufficient_curvature_decreaseE">
<span id="ceres::Solver::Options::line_search_sufficient_curvature_decrease__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">line_search_sufficient_curvature_decrease</code><a class="headerlink" href="#_CPPv2N6Solver7Options41line_search_sufficient_curvature_decreaseE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">0.9</span></code></p>
<p>The strong Wolfe conditions consist of the Armijo sufficient
decrease condition, and an additional requirement that the
step size be chosen s.t. the <em>magnitude</em> (&#8216;strong&#8217; Wolfe
conditions) of the gradient along the search direction
decreases sufficiently. Precisely, this second condition
is that we seek a step size s.t.</p>
<div class="math">
\[\|f'(\text{step_size})\| &lt;= \text{sufficient_curvature_decrease} * \|f'(0)\|\]</div>
<p>Where <span class="math">\(f()\)</span> is the line search objective and <span class="math">\(f'()\)</span> is the derivative
of <span class="math">\(f\)</span> with respect to the step size: <span class="math">\(\frac{d f}{d~\text{step size}}\)</span>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options30max_line_search_step_expansionE">
<span id="ceres::Solver::Options::max_line_search_step_expansion__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">max_line_search_step_expansion</code><a class="headerlink" href="#_CPPv2N6Solver7Options30max_line_search_step_expansionE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">10.0</span></code></p>
<p>During the bracketing phase of a Wolfe line search, the step size
is increased until either a point satisfying the Wolfe conditions
is found, or an upper bound for a bracket containing a point
satisfying the conditions is found.  Precisely, at each iteration
of the expansion:</p>
<div class="math">
\[\text{new_step_size} &lt;= \text{max_step_expansion} * \text{step_size}\]</div>
<p>By definition for expansion</p>
<div class="math">
\[\text{max_step_expansion} &gt; 1.0\]</div>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options26trust_region_strategy_typeE">
<span id="ceres::Solver::Options::trust_region_strategy_type__TrustRegionStrategyType"></span>TrustRegionStrategyType <code class="descclassname">Solver::Options::</code><code class="descname">trust_region_strategy_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options26trust_region_strategy_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">LEVENBERG_MARQUARDT</span></code></p>
<p>The trust region step computation algorithm used by
Ceres. Currently <code class="docutils literal"><span class="pre">LEVENBERG_MARQUARDT</span></code> and <code class="docutils literal"><span class="pre">DOGLEG</span></code> are the two
valid choices. See <a class="reference internal" href="#section-levenberg-marquardt"><span class="std std-ref">Levenberg-Marquardt</span></a> and
<a class="reference internal" href="#section-dogleg"><span class="std std-ref">Dogleg</span></a> for more details.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options11dogleg_typeE">
<span id="ceres::Solver::Options::dogleg_type__DoglegType"></span>DoglegType <code class="descclassname">Solver::Options::</code><code class="descname">dogleg_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options11dogleg_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">TRADITIONAL_DOGLEG</span></code></p>
<p>Ceres supports two different dogleg strategies.
<code class="docutils literal"><span class="pre">TRADITIONAL_DOGLEG</span></code> method by Powell and the <code class="docutils literal"><span class="pre">SUBSPACE_DOGLEG</span></code>
method described by <a class="reference internal" href="bibliography.html#byrdschnabel" id="id39">[ByrdSchnabel]</a> .  See <a class="reference internal" href="#section-dogleg"><span class="std std-ref">Dogleg</span></a>
for more details.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options22use_nonmonotonic_stepsE">
<span id="ceres::Solver::Options::use_nonmonotonic_steps__b"></span>bool <code class="descclassname">Solver::Options::</code><code class="descname">use_nonmonotonic_steps</code><a class="headerlink" href="#_CPPv2N6Solver7Options22use_nonmonotonic_stepsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">false</span></code></p>
<p>Relax the requirement that the trust-region algorithm take strictly
decreasing steps. See <a class="reference internal" href="#section-non-monotonic-steps"><span class="std std-ref">Non-monotonic Steps</span></a> for more
details.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options34max_consecutive_nonmonotonic_stepsE">
<span id="ceres::Solver::Options::max_consecutive_nonmonotonic_steps__i"></span>int <code class="descclassname">Solver::Options::</code><code class="descname">max_consecutive_nonmonotonic_steps</code><a class="headerlink" href="#_CPPv2N6Solver7Options34max_consecutive_nonmonotonic_stepsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">5</span></code></p>
<p>The window size used by the step selection algorithm to accept
non-monotonic steps.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options18max_num_iterationsE">
<span id="ceres::Solver::Options::max_num_iterations__i"></span>int <code class="descclassname">Solver::Options::</code><code class="descname">max_num_iterations</code><a class="headerlink" href="#_CPPv2N6Solver7Options18max_num_iterationsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">50</span></code></p>
<p>Maximum number of iterations for which the solver should run.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options26max_solver_time_in_secondsE">
<span id="ceres::Solver::Options::max_solver_time_in_seconds__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">max_solver_time_in_seconds</code><a class="headerlink" href="#_CPPv2N6Solver7Options26max_solver_time_in_secondsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e6</span></code>
Maximum amount of time for which the solver should run.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options11num_threadsE">
<span id="ceres::Solver::Options::num_threads__i"></span>int <code class="descclassname">Solver::Options::</code><code class="descname">num_threads</code><a class="headerlink" href="#_CPPv2N6Solver7Options11num_threadsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1</span></code></p>
<p>Number of threads used by Ceres to evaluate the Jacobian.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options27initial_trust_region_radiusE">
<span id="ceres::Solver::Options::initial_trust_region_radius__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">initial_trust_region_radius</code><a class="headerlink" href="#_CPPv2N6Solver7Options27initial_trust_region_radiusE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e4</span></code></p>
<p>The size of the initial trust region. When the
<code class="docutils literal"><span class="pre">LEVENBERG_MARQUARDT</span></code> strategy is used, the reciprocal of this
number is the initial regularization parameter.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options23max_trust_region_radiusE">
<span id="ceres::Solver::Options::max_trust_region_radius__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">max_trust_region_radius</code><a class="headerlink" href="#_CPPv2N6Solver7Options23max_trust_region_radiusE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e16</span></code></p>
<p>The trust region radius is not allowed to grow beyond this value.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options23min_trust_region_radiusE">
<span id="ceres::Solver::Options::min_trust_region_radius__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">min_trust_region_radius</code><a class="headerlink" href="#_CPPv2N6Solver7Options23min_trust_region_radiusE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e-32</span></code></p>
<p>The solver terminates, when the trust region becomes smaller than
this value.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options21min_relative_decreaseE">
<span id="ceres::Solver::Options::min_relative_decrease__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">min_relative_decrease</code><a class="headerlink" href="#_CPPv2N6Solver7Options21min_relative_decreaseE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e-3</span></code></p>
<p>Lower threshold for relative decrease before a trust-region step is
accepted.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options15min_lm_diagonalE">
<span id="ceres::Solver::Options::min_lm_diagonal__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">min_lm_diagonal</code><a class="headerlink" href="#_CPPv2N6Solver7Options15min_lm_diagonalE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e6</span></code></p>
<p>The <code class="docutils literal"><span class="pre">LEVENBERG_MARQUARDT</span></code> strategy, uses a diagonal matrix to
regularize the trust region step. This is the lower bound on
the values of this diagonal matrix.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options15max_lm_diagonalE">
<span id="ceres::Solver::Options::max_lm_diagonal__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">max_lm_diagonal</code><a class="headerlink" href="#_CPPv2N6Solver7Options15max_lm_diagonalE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default:  <code class="docutils literal"><span class="pre">1e32</span></code></p>
<p>The <code class="docutils literal"><span class="pre">LEVENBERG_MARQUARDT</span></code> strategy, uses a diagonal matrix to
regularize the trust region step. This is the upper bound on
the values of this diagonal matrix.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options33max_num_consecutive_invalid_stepsE">
<span id="ceres::Solver::Options::max_num_consecutive_invalid_steps__i"></span>int <code class="descclassname">Solver::Options::</code><code class="descname">max_num_consecutive_invalid_steps</code><a class="headerlink" href="#_CPPv2N6Solver7Options33max_num_consecutive_invalid_stepsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">5</span></code></p>
<p>The step returned by a trust region strategy can sometimes be
numerically invalid, usually because of conditioning
issues. Instead of crashing or stopping the optimization, the
optimizer can go ahead and try solving with a smaller trust
region/better conditioned problem. This parameter sets the number
of consecutive retries before the minimizer gives up.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options18function_toleranceE">
<span id="ceres::Solver::Options::function_tolerance__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">function_tolerance</code><a class="headerlink" href="#_CPPv2N6Solver7Options18function_toleranceE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e-6</span></code></p>
<p>Solver terminates if</p>
<div class="math">
\[\frac{|\Delta \text{cost}|}{\text{cost}} &lt;= \text{function_tolerance}\]</div>
<p>where, <span class="math">\(\Delta \text{cost}\)</span> is the change in objective
function value (up or down) in the current iteration of
Levenberg-Marquardt.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options18gradient_toleranceE">
<span id="ceres::Solver::Options::gradient_tolerance__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">gradient_tolerance</code><a class="headerlink" href="#_CPPv2N6Solver7Options18gradient_toleranceE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e-10</span></code></p>
<p>Solver terminates if</p>
<div class="math">
\[\|x - \Pi \boxplus(x, -g(x))\|_\infty &lt;= \text{gradient_tolerance}\]</div>
<p>where <span class="math">\(\|\cdot\|_\infty\)</span> refers to the max norm, <span class="math">\(\Pi\)</span>
is projection onto the bounds constraints and <span class="math">\(\boxplus\)</span> is
Plus operation for the overall local parameterization associated
with the parameter vector.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options19parameter_toleranceE">
<span id="ceres::Solver::Options::parameter_tolerance__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">parameter_tolerance</code><a class="headerlink" href="#_CPPv2N6Solver7Options19parameter_toleranceE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e-8</span></code></p>
<p>Solver terminates if</p>
<div class="math">
\[\|\Delta x\| &lt;= (\|x\| + \text{parameter_tolerance}) * \text{parameter_tolerance}\]</div>
<p>where <span class="math">\(\Delta x\)</span> is the step computed by the linear solver in
the current iteration.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options18linear_solver_typeE">
<span id="ceres::Solver::Options::linear_solver_type__LinearSolverType"></span>LinearSolverType <code class="descclassname">Solver::Options::</code><code class="descname">linear_solver_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options18linear_solver_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">SPARSE_NORMAL_CHOLESKY</span></code> / <code class="docutils literal"><span class="pre">DENSE_QR</span></code></p>
<p>Type of linear solver used to compute the solution to the linear
least squares problem in each iteration of the Levenberg-Marquardt
algorithm. If Ceres is built with support for <code class="docutils literal"><span class="pre">SuiteSparse</span></code> or
<code class="docutils literal"><span class="pre">CXSparse</span></code> or <code class="docutils literal"><span class="pre">Eigen</span></code>&#8216;s sparse Cholesky factorization, the
default is <code class="docutils literal"><span class="pre">SPARSE_NORMAL_CHOLESKY</span></code>, it is <code class="docutils literal"><span class="pre">DENSE_QR</span></code>
otherwise.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options19preconditioner_typeE">
<span id="ceres::Solver::Options::preconditioner_type__PreconditionerType"></span>PreconditionerType <code class="descclassname">Solver::Options::</code><code class="descname">preconditioner_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options19preconditioner_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">JACOBI</span></code></p>
<p>The preconditioner used by the iterative linear solver. The default
is the block Jacobi preconditioner. Valid values are (in increasing
order of complexity) <code class="docutils literal"><span class="pre">IDENTITY</span></code>, <code class="docutils literal"><span class="pre">JACOBI</span></code>, <code class="docutils literal"><span class="pre">SCHUR_JACOBI</span></code>,
<code class="docutils literal"><span class="pre">CLUSTER_JACOBI</span></code> and <code class="docutils literal"><span class="pre">CLUSTER_TRIDIAGONAL</span></code>. See
<a class="reference internal" href="#section-preconditioner"><span class="std std-ref">Preconditioner</span></a> for more details.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options26visibility_clustering_typeE">
<span id="ceres::Solver::Options::visibility_clustering_type__VisibilityClusteringType"></span>VisibilityClusteringType <code class="descclassname">Solver::Options::</code><code class="descname">visibility_clustering_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options26visibility_clustering_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">CANONICAL_VIEWS</span></code></p>
<p>Type of clustering algorithm to use when constructing a visibility
based preconditioner. The original visibility based preconditioning
paper and implementation only used the canonical views algorithm.</p>
<p>This algorithm gives high quality results but for large dense
graphs can be particularly expensive. As its worst case complexity
is cubic in size of the graph.</p>
<p>Another option is to use <code class="docutils literal"><span class="pre">SINGLE_LINKAGE</span></code> which is a simple
thresholded single linkage clustering algorithm that only pays
attention to tightly coupled blocks in the Schur complement. This
is a fast algorithm that works well.</p>
<p>The optimal choice of the clustering algorithm depends on the
sparsity structure of the problem, but generally speaking we
recommend that you try <code class="docutils literal"><span class="pre">CANONICAL_VIEWS</span></code> first and if it is too
expensive try <code class="docutils literal"><span class="pre">SINGLE_LINKAGE</span></code>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options33dense_linear_algebra_library_typeE">
<span id="ceres::Solver::Options::dense_linear_algebra_library_type__DenseLinearAlgebraLibrary"></span>DenseLinearAlgebraLibrary <code class="descclassname">Solver::Options::</code><code class="descname">dense_linear_algebra_library_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options33dense_linear_algebra_library_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default:<code class="docutils literal"><span class="pre">EIGEN</span></code></p>
<p>Ceres supports using multiple dense linear algebra libraries for
dense matrix factorizations. Currently <code class="docutils literal"><span class="pre">EIGEN</span></code> and <code class="docutils literal"><span class="pre">LAPACK</span></code> are
the valid choices. <code class="docutils literal"><span class="pre">EIGEN</span></code> is always available, <code class="docutils literal"><span class="pre">LAPACK</span></code> refers
to the system <code class="docutils literal"><span class="pre">BLAS</span> <span class="pre">+</span> <span class="pre">LAPACK</span></code> library which may or may not be
available.</p>
<p>This setting affects the <code class="docutils literal"><span class="pre">DENSE_QR</span></code>, <code class="docutils literal"><span class="pre">DENSE_NORMAL_CHOLESKY</span></code>
and <code class="docutils literal"><span class="pre">DENSE_SCHUR</span></code> solvers. For small to moderate sized probem
<code class="docutils literal"><span class="pre">EIGEN</span></code> is a fine choice but for large problems, an optimized
<code class="docutils literal"><span class="pre">LAPACK</span> <span class="pre">+</span> <span class="pre">BLAS</span></code> implementation can make a substantial difference
in performance.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options34sparse_linear_algebra_library_typeE">
<span id="ceres::Solver::Options::sparse_linear_algebra_library_type__SparseLinearAlgebraLibrary"></span>SparseLinearAlgebraLibrary <code class="descclassname">Solver::Options::</code><code class="descname">sparse_linear_algebra_library_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options34sparse_linear_algebra_library_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: The highest available according to: <code class="docutils literal"><span class="pre">SUITE_SPARSE</span></code> &gt;
<code class="docutils literal"><span class="pre">CX_SPARSE</span></code> &gt; <code class="docutils literal"><span class="pre">EIGEN_SPARSE</span></code> &gt; <code class="docutils literal"><span class="pre">NO_SPARSE</span></code></p>
<p>Ceres supports the use of three sparse linear algebra libraries,
<code class="docutils literal"><span class="pre">SuiteSparse</span></code>, which is enabled by setting this parameter to
<code class="docutils literal"><span class="pre">SUITE_SPARSE</span></code>, <code class="docutils literal"><span class="pre">CXSparse</span></code>, which can be selected by setting
this parameter to <code class="docutils literal"><span class="pre">CX_SPARSE</span></code> and <code class="docutils literal"><span class="pre">Eigen</span></code> which is enabled by
setting this parameter to <code class="docutils literal"><span class="pre">EIGEN_SPARSE</span></code>.  Lastly, <code class="docutils literal"><span class="pre">NO_SPARSE</span></code>
means that no sparse linear solver should be used; note that this is
irrespective of whether Ceres was compiled with support for one.</p>
<p><code class="docutils literal"><span class="pre">SuiteSparse</span></code> is a sophisticated and complex sparse linear
algebra library and should be used in general.</p>
<p>If your needs/platforms prevent you from using <code class="docutils literal"><span class="pre">SuiteSparse</span></code>,
consider using <code class="docutils literal"><span class="pre">CXSparse</span></code>, which is a much smaller, easier to
build library. As can be expected, its performance on large
problems is not comparable to that of <code class="docutils literal"><span class="pre">SuiteSparse</span></code>.</p>
<p>Last but not the least you can use the sparse linear algebra
routines in <code class="docutils literal"><span class="pre">Eigen</span></code>. Currently the performance of this library is
the poorest of the three. But this should change in the near
future.</p>
<p>Another thing to consider here is that the sparse Cholesky
factorization libraries in Eigen are licensed under <code class="docutils literal"><span class="pre">LGPL</span></code> and
building Ceres with support for <code class="docutils literal"><span class="pre">EIGEN_SPARSE</span></code> will result in an
LGPL licensed library (since the corresponding code from Eigen is
compiled into the library).</p>
<p>The upside is that you do not need to build and link to an external
library to use <code class="docutils literal"><span class="pre">EIGEN_SPARSE</span></code>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options25num_linear_solver_threadsE">
<span id="ceres::Solver::Options::num_linear_solver_threads__i"></span>int <code class="descclassname">Solver::Options::</code><code class="descname">num_linear_solver_threads</code><a class="headerlink" href="#_CPPv2N6Solver7Options25num_linear_solver_threadsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1</span></code></p>
<p>Number of threads used by the linear solver.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options22linear_solver_orderingE">
<span id="ceres::Solver::Options::linear_solver_ordering__shared_ptr:ParameterBlockOrdering:"></span>shared_ptr&lt;<a class="reference internal" href="#_CPPv2N5ceres22ParameterBlockOrderingE" title="ceres::ParameterBlockOrdering">ParameterBlockOrdering</a>&gt; <code class="descclassname">Solver::Options::</code><code class="descname">linear_solver_ordering</code><a class="headerlink" href="#_CPPv2N6Solver7Options22linear_solver_orderingE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">NULL</span></code></p>
<p>An instance of the ordering object informs the solver about the
desired order in which parameter blocks should be eliminated by the
linear solvers. See section~ref{sec:ordering`` for more details.</p>
<p>If <code class="docutils literal"><span class="pre">NULL</span></code>, the solver is free to choose an ordering that it
thinks is best.</p>
<p>See <a class="reference internal" href="#section-ordering"><span class="std std-ref">Ordering</span></a> for more details.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options29use_explicit_schur_complementE">
<span id="ceres::Solver::Options::use_explicit_schur_complement__b"></span>bool <code class="descclassname">Solver::Options::</code><code class="descname">use_explicit_schur_complement</code><a class="headerlink" href="#_CPPv2N6Solver7Options29use_explicit_schur_complementE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">false</span></code></p>
<p>Use an explicitly computed Schur complement matrix with
<code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code>.</p>
<p>By default this option is disabled and <code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code>
evaluates evaluates matrix-vector products between the Schur
complement and a vector implicitly by exploiting the algebraic
expression for the Schur complement.</p>
<p>The cost of this evaluation scales with the number of non-zeros in
the Jacobian.</p>
<p>For small to medium sized problems there is a sweet spot where
computing the Schur complement is cheap enough that it is much more
efficient to explicitly compute it and use it for evaluating the
matrix-vector products.</p>
<p>Enabling this option tells <code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code> to use an explicitly
computed Schur complement. This can improve the performance of the
<code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code> solver significantly.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options17use_post_orderingE">
<span id="ceres::Solver::Options::use_post_ordering__b"></span>bool <code class="descclassname">Solver::Options::</code><code class="descname">use_post_ordering</code><a class="headerlink" href="#_CPPv2N6Solver7Options17use_post_orderingE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">false</span></code></p>
<p>Sparse Cholesky factorization algorithms use a fill-reducing
ordering to permute the columns of the Jacobian matrix. There are
two ways of doing this.</p>
<ol class="arabic simple">
<li>Compute the Jacobian matrix in some order and then have the
factorization algorithm permute the columns of the Jacobian.</li>
<li>Compute the Jacobian with its columns already permuted.</li>
</ol>
<p>The first option incurs a significant memory penalty. The
factorization algorithm has to make a copy of the permuted Jacobian
matrix, thus Ceres pre-permutes the columns of the Jacobian matrix
and generally speaking, there is no performance penalty for doing
so.</p>
<p>In some rare cases, it is worth using a more complicated reordering
algorithm which has slightly better runtime performance at the
expense of an extra copy of the Jacobian matrix. Setting
<code class="docutils literal"><span class="pre">use_postordering</span></code> to <code class="docutils literal"><span class="pre">true</span></code> enables this tradeoff.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options16dynamic_sparsityE">
<span id="ceres::Solver::Options::dynamic_sparsity__b"></span>bool <code class="descclassname">Solver::Options::</code><code class="descname">dynamic_sparsity</code><a class="headerlink" href="#_CPPv2N6Solver7Options16dynamic_sparsityE" title="Permalink to this definition">¶</a></dt>
<dd><p>Some non-linear least squares problems are symbolically dense but
numerically sparse. i.e. at any given state only a small number of
Jacobian entries are non-zero, but the position and number of
non-zeros is different depending on the state. For these problems
it can be useful to factorize the sparse jacobian at each solver
iteration instead of including all of the zero entries in a single
general factorization.</p>
<p>If your problem does not have this property (or you do not know),
then it is probably best to keep this false, otherwise it will
likely lead to worse performance.</p>
<p>This setting only affects the <cite>SPARSE_NORMAL_CHOLESKY</cite> solver.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options28min_linear_solver_iterationsE">
<span id="ceres::Solver::Options::min_linear_solver_iterations__i"></span>int <code class="descclassname">Solver::Options::</code><code class="descname">min_linear_solver_iterations</code><a class="headerlink" href="#_CPPv2N6Solver7Options28min_linear_solver_iterationsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">0</span></code></p>
<p>Minimum number of iterations used by the linear solver. This only
makes sense when the linear solver is an iterative solver, e.g.,
<code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code> or <code class="docutils literal"><span class="pre">CGNR</span></code>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options28max_linear_solver_iterationsE">
<span id="ceres::Solver::Options::max_linear_solver_iterations__i"></span>int <code class="descclassname">Solver::Options::</code><code class="descname">max_linear_solver_iterations</code><a class="headerlink" href="#_CPPv2N6Solver7Options28max_linear_solver_iterationsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">500</span></code></p>
<p>Minimum number of iterations used by the linear solver. This only
makes sense when the linear solver is an iterative solver, e.g.,
<code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code> or <code class="docutils literal"><span class="pre">CGNR</span></code>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options3etaE">
<span id="ceres::Solver::Options::eta__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">eta</code><a class="headerlink" href="#_CPPv2N6Solver7Options3etaE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e-1</span></code></p>
<p>Forcing sequence parameter. The truncated Newton solver uses this
number to control the relative accuracy with which the Newton step
is computed. This constant is passed to
<code class="docutils literal"><span class="pre">ConjugateGradientsSolver</span></code> which uses it to terminate the
iterations when</p>
<div class="math">
\[\frac{Q_i - Q_{i-1}}{Q_i} &lt; \frac{\eta}{i}\]</div>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options14jacobi_scalingE">
<span id="ceres::Solver::Options::jacobi_scaling__b"></span>bool <code class="descclassname">Solver::Options::</code><code class="descname">jacobi_scaling</code><a class="headerlink" href="#_CPPv2N6Solver7Options14jacobi_scalingE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">true</span></code></p>
<p><code class="docutils literal"><span class="pre">true</span></code> means that the Jacobian is scaled by the norm of its
columns before being passed to the linear solver. This improves the
numerical conditioning of the normal equations.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options20use_inner_iterationsE">
<span id="ceres::Solver::Options::use_inner_iterations__b"></span>bool <code class="descclassname">Solver::Options::</code><code class="descname">use_inner_iterations</code><a class="headerlink" href="#_CPPv2N6Solver7Options20use_inner_iterationsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">false</span></code></p>
<p>Use a non-linear version of a simplified variable projection
algorithm. Essentially this amounts to doing a further optimization
on each Newton/Trust region step using a coordinate descent
algorithm.  For more details, see <a class="reference internal" href="#section-inner-iterations"><span class="std std-ref">Inner Iterations</span></a>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options25inner_iteration_toleranceE">
<span id="ceres::Solver::Options::inner_iteration_tolerance__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">inner_iteration_tolerance</code><a class="headerlink" href="#_CPPv2N6Solver7Options25inner_iteration_toleranceE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e-3</span></code></p>
<p>Generally speaking, inner iterations make significant progress in
the early stages of the solve and then their contribution drops
down sharply, at which point the time spent doing inner iterations
is not worth it.</p>
<p>Once the relative decrease in the objective function due to inner
iterations drops below <code class="docutils literal"><span class="pre">inner_iteration_tolerance</span></code>, the use of
inner iterations in subsequent trust region minimizer iterations is
disabled.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options24inner_iteration_orderingE">
<span id="ceres::Solver::Options::inner_iteration_ordering__shared_ptr:ParameterBlockOrdering:"></span>shared_ptr&lt;<a class="reference internal" href="#_CPPv2N5ceres22ParameterBlockOrderingE" title="ceres::ParameterBlockOrdering">ParameterBlockOrdering</a>&gt; <code class="descclassname">Solver::Options::</code><code class="descname">inner_iteration_ordering</code><a class="headerlink" href="#_CPPv2N6Solver7Options24inner_iteration_orderingE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">NULL</span></code></p>
<p>If <a class="reference internal" href="#_CPPv2N6Solver7Options20use_inner_iterationsE" title="ceres::Solver::Options::use_inner_iterations"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::use_inner_iterations</span></code></a> true, then the
user has two choices.</p>
<ol class="arabic simple">
<li>Let the solver heuristically decide which parameter blocks to
optimize in each inner iteration. To do this, set
<a class="reference internal" href="#_CPPv2N6Solver7Options24inner_iteration_orderingE" title="ceres::Solver::Options::inner_iteration_ordering"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::inner_iteration_ordering</span></code></a> to <code class="docutils literal"><span class="pre">NULL</span></code>.</li>
<li>Specify a collection of of ordered independent sets. The lower
numbered groups are optimized before the higher number groups
during the inner optimization phase. Each group must be an
independent set. Not all parameter blocks need to be included in
the ordering.</li>
</ol>
<p>See <a class="reference internal" href="#section-ordering"><span class="std std-ref">Ordering</span></a> for more details.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options12logging_typeE">
<span id="ceres::Solver::Options::logging_type__LoggingType"></span>LoggingType <code class="descclassname">Solver::Options::</code><code class="descname">logging_type</code><a class="headerlink" href="#_CPPv2N6Solver7Options12logging_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">PER_MINIMIZER_ITERATION</span></code></p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options28minimizer_progress_to_stdoutE">
<span id="ceres::Solver::Options::minimizer_progress_to_stdout__b"></span>bool <code class="descclassname">Solver::Options::</code><code class="descname">minimizer_progress_to_stdout</code><a class="headerlink" href="#_CPPv2N6Solver7Options28minimizer_progress_to_stdoutE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">false</span></code></p>
<p>By default the <code class="xref cpp cpp-class docutils literal"><span class="pre">Minimizer</span></code> progress is logged to <code class="docutils literal"><span class="pre">STDERR</span></code>
depending on the <code class="docutils literal"><span class="pre">vlog</span></code> level. If this flag is set to true, and
<a class="reference internal" href="#_CPPv2N6Solver7Options12logging_typeE" title="ceres::Solver::Options::logging_type"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::logging_type</span></code></a> is not <code class="docutils literal"><span class="pre">SILENT</span></code>, the logging
output is sent to <code class="docutils literal"><span class="pre">STDOUT</span></code>.</p>
<p>For <code class="docutils literal"><span class="pre">TRUST_REGION_MINIMIZER</span></code> the progress display looks like</p>
<div class="highlight-bash"><div class="highlight"><pre><span></span>iter      cost      cost_change  <span class="p">|</span>gradient<span class="p">|</span>   <span class="p">|</span>step<span class="p">|</span>    tr_ratio  tr_radius  ls_iter  iter_time  total_time
   <span class="m">0</span>  4.185660e+06    0.00e+00    1.09e+08   0.00e+00   0.00e+00  1.00e+04       <span class="m">0</span>    7.59e-02    3.37e-01
   <span class="m">1</span>  1.062590e+05    4.08e+06    8.99e+06   5.36e+02   9.82e-01  3.00e+04       <span class="m">1</span>    1.65e-01    5.03e-01
   <span class="m">2</span>  4.992817e+04    5.63e+04    8.32e+06   3.19e+02   6.52e-01  3.09e+04       <span class="m">1</span>    1.45e-01    6.48e-01
</pre></div>
</div>
<p>Here</p>
<ol class="arabic simple">
<li><code class="docutils literal"><span class="pre">cost</span></code> is the value of the objective function.</li>
<li><code class="docutils literal"><span class="pre">cost_change</span></code> is the change in the value of the objective
function if the step computed in this iteration is accepted.</li>
<li><code class="docutils literal"><span class="pre">|gradient|</span></code> is the max norm of the gradient.</li>
<li><code class="docutils literal"><span class="pre">|step|</span></code> is the change in the parameter vector.</li>
<li><code class="docutils literal"><span class="pre">tr_ratio</span></code> is the ratio of the actual change in the objective
function value to the change in the value of the trust
region model.</li>
<li><code class="docutils literal"><span class="pre">tr_radius</span></code> is the size of the trust region radius.</li>
<li><code class="docutils literal"><span class="pre">ls_iter</span></code> is the number of linear solver iterations used to
compute the trust region step. For direct/factorization based
solvers it is always 1, for iterative solvers like
<code class="docutils literal"><span class="pre">ITERATIVE_SCHUR</span></code> it is the number of iterations of the
Conjugate Gradients algorithm.</li>
<li><code class="docutils literal"><span class="pre">iter_time</span></code> is the time take by the current iteration.</li>
<li><code class="docutils literal"><span class="pre">total_time</span></code> is the total time taken by the minimizer.</li>
</ol>
<p>For <code class="docutils literal"><span class="pre">LINE_SEARCH_MINIMIZER</span></code> the progress display looks like</p>
<div class="highlight-bash"><div class="highlight"><pre><span></span>0: f: 2.317806e+05 d: 0.00e+00 g: 3.19e-01 h: 0.00e+00 s: 0.00e+00 e:  <span class="m">0</span> it: 2.98e-02 tt: 8.50e-02
1: f: 2.312019e+05 d: 5.79e+02 g: 3.18e-01 h: 2.41e+01 s: 1.00e+00 e:  <span class="m">1</span> it: 4.54e-02 tt: 1.31e-01
2: f: 2.300462e+05 d: 1.16e+03 g: 3.17e-01 h: 4.90e+01 s: 2.54e-03 e:  <span class="m">1</span> it: 4.96e-02 tt: 1.81e-01
</pre></div>
</div>
<p>Here</p>
<ol class="arabic simple">
<li><code class="docutils literal"><span class="pre">f</span></code> is the value of the objective function.</li>
<li><code class="docutils literal"><span class="pre">d</span></code> is the change in the value of the objective function if
the step computed in this iteration is accepted.</li>
<li><code class="docutils literal"><span class="pre">g</span></code> is the max norm of the gradient.</li>
<li><code class="docutils literal"><span class="pre">h</span></code> is the change in the parameter vector.</li>
<li><code class="docutils literal"><span class="pre">s</span></code> is the optimal step length computed by the line search.</li>
<li><code class="docutils literal"><span class="pre">it</span></code> is the time take by the current iteration.</li>
<li><code class="docutils literal"><span class="pre">tt</span></code> is the total time taken by the minimizer.</li>
</ol>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options41trust_region_minimizer_iterations_to_dumpE">
<span id="ceres::Solver::Options::trust_region_minimizer_iterations_to_dump__vector:i:"></span>vector&lt;int&gt; <code class="descclassname">Solver::Options::</code><code class="descname">trust_region_minimizer_iterations_to_dump</code><a class="headerlink" href="#_CPPv2N6Solver7Options41trust_region_minimizer_iterations_to_dumpE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">empty</span></code></p>
<p>List of iterations at which the trust region minimizer should dump
the trust region problem. Useful for testing and benchmarking. If
<code class="docutils literal"><span class="pre">empty</span></code>, no problems are dumped.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options35trust_region_problem_dump_directoryE">
<span id="ceres::Solver::Options::trust_region_problem_dump_directory__string"></span>string <code class="descclassname">Solver::Options::</code><code class="descname">trust_region_problem_dump_directory</code><a class="headerlink" href="#_CPPv2N6Solver7Options35trust_region_problem_dump_directoryE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">/tmp</span></code></p>
<blockquote>
<div>Directory to which the problems should be written to. Should be
non-empty if
<a class="reference internal" href="#_CPPv2N6Solver7Options41trust_region_minimizer_iterations_to_dumpE" title="ceres::Solver::Options::trust_region_minimizer_iterations_to_dump"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::trust_region_minimizer_iterations_to_dump</span></code></a> is
non-empty and
<code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::trust_region_problem_dump_format_type</span></code> is not
<code class="docutils literal"><span class="pre">CONSOLE</span></code>.</div></blockquote>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options32trust_region_problem_dump_formatE">
<span id="ceres::Solver::Options::trust_region_problem_dump_format__DumpFormatType"></span>DumpFormatType <code class="descclassname">Solver::Options::</code><code class="descname">trust_region_problem_dump_format</code><a class="headerlink" href="#_CPPv2N6Solver7Options32trust_region_problem_dump_formatE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">TEXTFILE</span></code></p>
<p>The format in which trust region problems should be logged when
<a class="reference internal" href="#_CPPv2N6Solver7Options41trust_region_minimizer_iterations_to_dumpE" title="ceres::Solver::Options::trust_region_minimizer_iterations_to_dump"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::trust_region_minimizer_iterations_to_dump</span></code></a>
is non-empty.  There are three options:</p>
<ul>
<li><dl class="first docutils">
<dt><code class="docutils literal"><span class="pre">CONSOLE</span></code> prints the linear least squares problem in a human</dt>
<dd><p class="first last">readable format to <code class="docutils literal"><span class="pre">stderr</span></code>. The Jacobian is printed as a
dense matrix. The vectors <span class="math">\(D\)</span>, <span class="math">\(x\)</span> and <span class="math">\(f\)</span> are
printed as dense vectors. This should only be used for small
problems.</p>
</dd>
</dl>
</li>
<li><p class="first"><code class="docutils literal"><span class="pre">TEXTFILE</span></code> Write out the linear least squares problem to the
directory pointed to by
<a class="reference internal" href="#_CPPv2N6Solver7Options35trust_region_problem_dump_directoryE" title="ceres::Solver::Options::trust_region_problem_dump_directory"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::trust_region_problem_dump_directory</span></code></a> as
text files which can be read into <code class="docutils literal"><span class="pre">MATLAB/Octave</span></code>. The Jacobian
is dumped as a text file containing <span class="math">\((i,j,s)\)</span> triplets, the
vectors <span class="math">\(D\)</span>, <cite>x</cite> and <cite>f</cite> are dumped as text files
containing a list of their values.</p>
<p>A <code class="docutils literal"><span class="pre">MATLAB/Octave</span></code> script called
<code class="docutils literal"><span class="pre">ceres_solver_iteration_???.m</span></code> is also output, which can be
used to parse and load the problem into memory.</p>
</li>
</ul>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options15check_gradientsE">
<span id="ceres::Solver::Options::check_gradients__b"></span>bool <code class="descclassname">Solver::Options::</code><code class="descname">check_gradients</code><a class="headerlink" href="#_CPPv2N6Solver7Options15check_gradientsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">false</span></code></p>
<p>Check all Jacobians computed by each residual block with finite
differences. This is expensive since it involves computing the
derivative by normal means (e.g. user specified, autodiff, etc),
then also computing it using finite differences. The results are
compared, and if they differ substantially, the optimization fails
and the details are stored in the solver summary.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options33gradient_check_relative_precisionE">
<span id="ceres::Solver::Options::gradient_check_relative_precision__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">gradient_check_relative_precision</code><a class="headerlink" href="#_CPPv2N6Solver7Options33gradient_check_relative_precisionE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e08</span></code></p>
<p>Precision to check for in the gradient checker. If the relative
difference between an element in a Jacobian exceeds this number,
then the Jacobian for that cost term is dumped.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options52gradient_check_numeric_derivative_relative_step_sizeE">
<span id="ceres::Solver::Options::gradient_check_numeric_derivative_relative_step_size__double"></span>double <code class="descclassname">Solver::Options::</code><code class="descname">gradient_check_numeric_derivative_relative_step_size</code><a class="headerlink" href="#_CPPv2N6Solver7Options52gradient_check_numeric_derivative_relative_step_sizeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">1e-6</span></code></p>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p class="last">This option only applies to the numeric differentiation used for
checking the user provided derivatives when when
<cite>Solver::Options::check_gradients</cite> is true. If you are using
<a class="reference internal" href="nnls_modeling.html#_CPPv2N5ceres23NumericDiffCostFunctionE" title="ceres::NumericDiffCostFunction"><code class="xref cpp cpp-class docutils literal"><span class="pre">NumericDiffCostFunction</span></code></a> and are interested in changing
the step size for numeric differentiation in your cost function,
please have a look at <code class="xref cpp cpp-class docutils literal"><span class="pre">NumericDiffOptions</span></code>.</p>
</div>
<p>Relative shift used for taking numeric derivatives when
<cite>Solver::Options::check_gradients</cite> is <cite>true</cite>.</p>
<p>For finite differencing, each dimension is evaluated at slightly
shifted values, e.g., for forward differences, the numerical
derivative is</p>
<div class="math">
\[\begin{split}\delta &amp;= gradient\_check\_numeric\_derivative\_relative\_step\_size\\
\Delta f &amp;= \frac{f((1 + \delta)  x) - f(x)}{\delta x}\end{split}\]</div>
<p>The finite differencing is done along each dimension. The reason to
use a relative (rather than absolute) step size is that this way,
numeric differentiation works for functions where the arguments are
typically large (e.g. <span class="math">\(10^9\)</span>) and when the values are small
(e.g. <span class="math">\(10^{-5}\)</span>). It is possible to construct <em>torture cases</em>
which break this finite difference heuristic, but they do not come
up often in practice.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options9callbacksE">
<span id="ceres::Solver::Options::callbacks__vector:IterationCallback:"></span>vector&lt;<a class="reference internal" href="#_CPPv2N5ceres17IterationCallbackE" title="ceres::IterationCallback">IterationCallback</a>&gt; <code class="descclassname">Solver::Options::</code><code class="descname">callbacks</code><a class="headerlink" href="#_CPPv2N6Solver7Options9callbacksE" title="Permalink to this definition">¶</a></dt>
<dd><p>Callbacks that are executed at the end of each iteration of the
<code class="xref cpp cpp-class docutils literal"><span class="pre">Minimizer</span></code>. They are executed in the order that they are
specified in this vector. By default, parameter blocks are updated
only at the end of the optimization, i.e., when the
<code class="xref cpp cpp-class docutils literal"><span class="pre">Minimizer</span></code> terminates. This behavior is controlled by
<code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::update_state_every_variable</span></code>. If the user
wishes to have access to the update parameter blocks when his/her
callbacks are executed, then set
<a class="reference internal" href="#_CPPv2N6Solver7Options28update_state_every_iterationE" title="ceres::Solver::Options::update_state_every_iteration"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::update_state_every_iteration</span></code></a> to true.</p>
<p>The solver does NOT take ownership of these pointers.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Options28update_state_every_iterationE">
<span id="ceres::Solver::Options::update_state_every_iteration__b"></span>bool <code class="descclassname">Solver::Options::</code><code class="descname">update_state_every_iteration</code><a class="headerlink" href="#_CPPv2N6Solver7Options28update_state_every_iterationE" title="Permalink to this definition">¶</a></dt>
<dd><p>Default: <code class="docutils literal"><span class="pre">false</span></code></p>
<p>Normally the parameter blocks are only updated when the solver
terminates. Setting this to true update them in every
iteration. This setting is useful when building an interactive
application using Ceres and using an <a class="reference internal" href="#_CPPv2N5ceres17IterationCallbackE" title="ceres::IterationCallback"><code class="xref cpp cpp-class docutils literal"><span class="pre">IterationCallback</span></code></a>.</p>
</dd></dl>

</div>
<div class="section" id="parameterblockordering">
<h2><a class="reference internal" href="#_CPPv2N5ceres22ParameterBlockOrderingE" title="ceres::ParameterBlockOrdering"><code class="xref cpp cpp-class docutils literal"><span class="pre">ParameterBlockOrdering</span></code></a><a class="headerlink" href="#parameterblockordering" title="Permalink to this headline">¶</a></h2>
<dl class="class">
<dt id="_CPPv2N5ceres22ParameterBlockOrderingE">
<span id="ceres::ParameterBlockOrdering"></span><em class="property">class </em><code class="descclassname"></code><code class="descname">ParameterBlockOrdering</code><a class="headerlink" href="#_CPPv2N5ceres22ParameterBlockOrderingE" title="Permalink to this definition">¶</a></dt>
<dd><p><code class="docutils literal"><span class="pre">ParameterBlockOrdering</span></code> is a class for storing and manipulating
an ordered collection of groups/sets with the following semantics:</p>
<p>Group IDs are non-negative integer values. Elements are any type
that can serve as a key in a map or an element of a set.</p>
<p>An element can only belong to one group at a time. A group may
contain an arbitrary number of elements.</p>
<p>Groups are ordered by their group id.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2N5ceres22ParameterBlockOrdering17AddElementToGroupEPKdKi">
<span id="ceres::ParameterBlockOrdering::AddElementToGroup__doubleCP.iC"></span>bool <code class="descclassname">ParameterBlockOrdering::</code><code class="descname">AddElementToGroup</code><span class="sig-paren">(</span><em class="property">const</em> double *<em>element</em>, <em class="property">const</em> int <em>group</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N5ceres22ParameterBlockOrdering17AddElementToGroupEPKdKi" title="Permalink to this definition">¶</a></dt>
<dd><p>Add an element to a group. If a group with this id does not exist,
one is created. This method can be called any number of times for
the same element. Group ids should be non-negative numbers.  Return
value indicates if adding the element was a success.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2N5ceres22ParameterBlockOrdering5ClearEv">
<span id="ceres::ParameterBlockOrdering::Clear"></span>void <code class="descclassname">ParameterBlockOrdering::</code><code class="descname">Clear</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N5ceres22ParameterBlockOrdering5ClearEv" title="Permalink to this definition">¶</a></dt>
<dd><p>Clear the ordering.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2N5ceres22ParameterBlockOrdering6RemoveEPKd">
<span id="ceres::ParameterBlockOrdering::Remove__doubleCP"></span>bool <code class="descclassname">ParameterBlockOrdering::</code><code class="descname">Remove</code><span class="sig-paren">(</span><em class="property">const</em> double *<em>element</em><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N5ceres22ParameterBlockOrdering6RemoveEPKd" title="Permalink to this definition">¶</a></dt>
<dd><p>Remove the element, no matter what group it is in. If the element
is not a member of any group, calling this method will result in a
crash.  Return value indicates if the element was actually removed.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2N5ceres22ParameterBlockOrdering7ReverseEv">
<span id="ceres::ParameterBlockOrdering::Reverse"></span>void <code class="descclassname">ParameterBlockOrdering::</code><code class="descname">Reverse</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#_CPPv2N5ceres22ParameterBlockOrdering7ReverseEv" title="Permalink to this definition">¶</a></dt>
<dd><p>Reverse the order of the groups in place.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2NK5ceres22ParameterBlockOrdering7GroupIdEPKd">
<span id="ceres::ParameterBlockOrdering::GroupId__doubleCPC"></span>int <code class="descclassname">ParameterBlockOrdering::</code><code class="descname">GroupId</code><span class="sig-paren">(</span><em class="property">const</em> double *<em>element</em><span class="sig-paren">)</span> <em class="property">const</em><a class="headerlink" href="#_CPPv2NK5ceres22ParameterBlockOrdering7GroupIdEPKd" title="Permalink to this definition">¶</a></dt>
<dd><p>Return the group id for the element. If the element is not a member
of any group, return -1.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2NK5ceres22ParameterBlockOrdering8IsMemberEPKd">
<span id="ceres::ParameterBlockOrdering::IsMember__doubleCPC"></span>bool <code class="descclassname">ParameterBlockOrdering::</code><code class="descname">IsMember</code><span class="sig-paren">(</span><em class="property">const</em> double *<em>element</em><span class="sig-paren">)</span> <em class="property">const</em><a class="headerlink" href="#_CPPv2NK5ceres22ParameterBlockOrdering8IsMemberEPKd" title="Permalink to this definition">¶</a></dt>
<dd><p>True if there is a group containing the parameter block.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2NK5ceres22ParameterBlockOrdering9GroupSizeEKi">
<span id="ceres::ParameterBlockOrdering::GroupSize__iCC"></span>int <code class="descclassname">ParameterBlockOrdering::</code><code class="descname">GroupSize</code><span class="sig-paren">(</span><em class="property">const</em> int <em>group</em><span class="sig-paren">)</span> <em class="property">const</em><a class="headerlink" href="#_CPPv2NK5ceres22ParameterBlockOrdering9GroupSizeEKi" title="Permalink to this definition">¶</a></dt>
<dd><p>This function always succeeds, i.e., implicitly there exists a
group for every integer.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2NK5ceres22ParameterBlockOrdering11NumElementsEv">
<span id="ceres::ParameterBlockOrdering::NumElementsC"></span>int <code class="descclassname">ParameterBlockOrdering::</code><code class="descname">NumElements</code><span class="sig-paren">(</span><span class="sig-paren">)</span> <em class="property">const</em><a class="headerlink" href="#_CPPv2NK5ceres22ParameterBlockOrdering11NumElementsEv" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of elements in the ordering.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2NK5ceres22ParameterBlockOrdering9NumGroupsEv">
<span id="ceres::ParameterBlockOrdering::NumGroupsC"></span>int <code class="descclassname">ParameterBlockOrdering::</code><code class="descname">NumGroups</code><span class="sig-paren">(</span><span class="sig-paren">)</span> <em class="property">const</em><a class="headerlink" href="#_CPPv2NK5ceres22ParameterBlockOrdering9NumGroupsEv" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of groups with one or more elements.</p>
</dd></dl>

</div>
<div class="section" id="iterationcallback">
<h2><a class="reference internal" href="#_CPPv2N5ceres17IterationCallbackE" title="ceres::IterationCallback"><code class="xref cpp cpp-class docutils literal"><span class="pre">IterationCallback</span></code></a><a class="headerlink" href="#iterationcallback" title="Permalink to this headline">¶</a></h2>
<dl class="class">
<dt id="_CPPv2N5ceres16IterationSummaryE">
<span id="ceres::IterationSummary"></span><em class="property">class </em><code class="descclassname"></code><code class="descname">IterationSummary</code><a class="headerlink" href="#_CPPv2N5ceres16IterationSummaryE" title="Permalink to this definition">¶</a></dt>
<dd><p><a class="reference internal" href="#_CPPv2N5ceres16IterationSummaryE" title="ceres::IterationSummary"><code class="xref cpp cpp-class docutils literal"><span class="pre">IterationSummary</span></code></a> describes the state of the minimizer at
the end of each iteration.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary9iterationE">
<span id="ceres::IterationSummary::iteration__int32"></span>int32 <code class="descclassname">IterationSummary::</code><code class="descname">iteration</code><a class="headerlink" href="#_CPPv2N16IterationSummary9iterationE" title="Permalink to this definition">¶</a></dt>
<dd><p>Current iteration number.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary13step_is_validE">
<span id="ceres::IterationSummary::step_is_valid__b"></span>bool <code class="descclassname">IterationSummary::</code><code class="descname">step_is_valid</code><a class="headerlink" href="#_CPPv2N16IterationSummary13step_is_validE" title="Permalink to this definition">¶</a></dt>
<dd><p>Step was numerically valid, i.e., all values are finite and the
step reduces the value of the linearized model.</p>
<blockquote>
<div><strong>Note</strong>: <a class="reference internal" href="#_CPPv2N16IterationSummary13step_is_validE" title="ceres::IterationSummary::step_is_valid"><code class="xref cpp cpp-member docutils literal"><span class="pre">IterationSummary::step_is_valid</span></code></a> is <cite>false</cite>
when <a class="reference internal" href="#_CPPv2N16IterationSummary9iterationE" title="ceres::IterationSummary::iteration"><code class="xref cpp cpp-member docutils literal"><span class="pre">IterationSummary::iteration</span></code></a> = 0.</div></blockquote>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary20step_is_nonmonotonicE">
<span id="ceres::IterationSummary::step_is_nonmonotonic__b"></span>bool <code class="descclassname">IterationSummary::</code><code class="descname">step_is_nonmonotonic</code><a class="headerlink" href="#_CPPv2N16IterationSummary20step_is_nonmonotonicE" title="Permalink to this definition">¶</a></dt>
<dd><p>Step did not reduce the value of the objective function
sufficiently, but it was accepted because of the relaxed
acceptance criterion used by the non-monotonic trust region
algorithm.</p>
<p><strong>Note</strong>: <a class="reference internal" href="#_CPPv2N16IterationSummary20step_is_nonmonotonicE" title="ceres::IterationSummary::step_is_nonmonotonic"><code class="xref cpp cpp-member docutils literal"><span class="pre">IterationSummary::step_is_nonmonotonic</span></code></a> is
<cite>false</cite> when when <a class="reference internal" href="#_CPPv2N16IterationSummary9iterationE" title="ceres::IterationSummary::iteration"><code class="xref cpp cpp-member docutils literal"><span class="pre">IterationSummary::iteration</span></code></a> = 0.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary18step_is_successfulE">
<span id="ceres::IterationSummary::step_is_successful__b"></span>bool <code class="descclassname">IterationSummary::</code><code class="descname">step_is_successful</code><a class="headerlink" href="#_CPPv2N16IterationSummary18step_is_successfulE" title="Permalink to this definition">¶</a></dt>
<dd><p>Whether or not the minimizer accepted this step or not.</p>
<p>If the ordinary trust region algorithm is used, this means that the
relative reduction in the objective function value was greater than
<a class="reference internal" href="#_CPPv2N6Solver7Options21min_relative_decreaseE" title="ceres::Solver::Options::min_relative_decrease"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::min_relative_decrease</span></code></a>. However, if the
non-monotonic trust region algorithm is used
(<a class="reference internal" href="#_CPPv2N6Solver7Options22use_nonmonotonic_stepsE" title="ceres::Solver::Options::use_nonmonotonic_steps"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::use_nonmonotonic_steps</span></code></a> = <cite>true</cite>), then
even if the relative decrease is not sufficient, the algorithm may
accept the step and the step is declared successful.</p>
<p><strong>Note</strong>: <a class="reference internal" href="#_CPPv2N16IterationSummary18step_is_successfulE" title="ceres::IterationSummary::step_is_successful"><code class="xref cpp cpp-member docutils literal"><span class="pre">IterationSummary::step_is_successful</span></code></a> is <cite>false</cite>
when when <a class="reference internal" href="#_CPPv2N16IterationSummary9iterationE" title="ceres::IterationSummary::iteration"><code class="xref cpp cpp-member docutils literal"><span class="pre">IterationSummary::iteration</span></code></a> = 0.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary4costE">
<span id="ceres::IterationSummary::cost__double"></span>double <code class="descclassname">IterationSummary::</code><code class="descname">cost</code><a class="headerlink" href="#_CPPv2N16IterationSummary4costE" title="Permalink to this definition">¶</a></dt>
<dd><p>Value of the objective function.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary11cost_changeE">
<span id="ceres::IterationSummary::cost_change__double"></span>double <code class="descclassname">IterationSummary::</code><code class="descname">cost_change</code><a class="headerlink" href="#_CPPv2N16IterationSummary11cost_changeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Change in the value of the objective function in this
iteration. This can be positive or negative.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary17gradient_max_normE">
<span id="ceres::IterationSummary::gradient_max_norm__double"></span>double <code class="descclassname">IterationSummary::</code><code class="descname">gradient_max_norm</code><a class="headerlink" href="#_CPPv2N16IterationSummary17gradient_max_normE" title="Permalink to this definition">¶</a></dt>
<dd><p>Infinity norm of the gradient vector.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary13gradient_normE">
<span id="ceres::IterationSummary::gradient_norm__double"></span>double <code class="descclassname">IterationSummary::</code><code class="descname">gradient_norm</code><a class="headerlink" href="#_CPPv2N16IterationSummary13gradient_normE" title="Permalink to this definition">¶</a></dt>
<dd><p>2-norm of the gradient vector.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary9step_normE">
<span id="ceres::IterationSummary::step_norm__double"></span>double <code class="descclassname">IterationSummary::</code><code class="descname">step_norm</code><a class="headerlink" href="#_CPPv2N16IterationSummary9step_normE" title="Permalink to this definition">¶</a></dt>
<dd><p>2-norm of the size of the step computed in this iteration.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary17relative_decreaseE">
<span id="ceres::IterationSummary::relative_decrease__double"></span>double <code class="descclassname">IterationSummary::</code><code class="descname">relative_decrease</code><a class="headerlink" href="#_CPPv2N16IterationSummary17relative_decreaseE" title="Permalink to this definition">¶</a></dt>
<dd><p>For trust region algorithms, the ratio of the actual change in cost
and the change in the cost of the linearized approximation.</p>
<p>This field is not used when a linear search minimizer is used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary19trust_region_radiusE">
<span id="ceres::IterationSummary::trust_region_radius__double"></span>double <code class="descclassname">IterationSummary::</code><code class="descname">trust_region_radius</code><a class="headerlink" href="#_CPPv2N16IterationSummary19trust_region_radiusE" title="Permalink to this definition">¶</a></dt>
<dd><p>Size of the trust region at the end of the current iteration. For
the Levenberg-Marquardt algorithm, the regularization parameter is
1.0 / member::<cite>IterationSummary::trust_region_radius</cite>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary3etaE">
<span id="ceres::IterationSummary::eta__double"></span>double <code class="descclassname">IterationSummary::</code><code class="descname">eta</code><a class="headerlink" href="#_CPPv2N16IterationSummary3etaE" title="Permalink to this definition">¶</a></dt>
<dd><p>For the inexact step Levenberg-Marquardt algorithm, this is the
relative accuracy with which the step is solved. This number is
only applicable to the iterative solvers capable of solving linear
systems inexactly. Factorization-based exact solvers always have an
eta of 0.0.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary9step_sizeE">
<span id="ceres::IterationSummary::step_size__double"></span>double <code class="descclassname">IterationSummary::</code><code class="descname">step_size</code><a class="headerlink" href="#_CPPv2N16IterationSummary9step_sizeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Step sized computed by the line search algorithm.</p>
<p>This field is not used when a trust region minimizer is used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary32line_search_function_evaluationsE">
<span id="ceres::IterationSummary::line_search_function_evaluations__i"></span>int <code class="descclassname">IterationSummary::</code><code class="descname">line_search_function_evaluations</code><a class="headerlink" href="#_CPPv2N16IterationSummary32line_search_function_evaluationsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of function evaluations used by the line search algorithm.</p>
<p>This field is not used when a trust region minimizer is used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary24linear_solver_iterationsE">
<span id="ceres::IterationSummary::linear_solver_iterations__i"></span>int <code class="descclassname">IterationSummary::</code><code class="descname">linear_solver_iterations</code><a class="headerlink" href="#_CPPv2N16IterationSummary24linear_solver_iterationsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of iterations taken by the linear solver to solve for the
trust region step.</p>
<p>Currently this field is not used when a line search minimizer is
used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary25iteration_time_in_secondsE">
<span id="ceres::IterationSummary::iteration_time_in_seconds__double"></span>double <code class="descclassname">IterationSummary::</code><code class="descname">iteration_time_in_seconds</code><a class="headerlink" href="#_CPPv2N16IterationSummary25iteration_time_in_secondsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Time (in seconds) spent inside the minimizer loop in the current
iteration.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary27step_solver_time_in_secondsE">
<span id="ceres::IterationSummary::step_solver_time_in_seconds__double"></span>double <code class="descclassname">IterationSummary::</code><code class="descname">step_solver_time_in_seconds</code><a class="headerlink" href="#_CPPv2N16IterationSummary27step_solver_time_in_secondsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Time (in seconds) spent inside the trust region step solver.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N16IterationSummary26cumulative_time_in_secondsE">
<span id="ceres::IterationSummary::cumulative_time_in_seconds__double"></span>double <code class="descclassname">IterationSummary::</code><code class="descname">cumulative_time_in_seconds</code><a class="headerlink" href="#_CPPv2N16IterationSummary26cumulative_time_in_secondsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Time (in seconds) since the user called Solve().</p>
</dd></dl>

<dl class="class">
<dt id="_CPPv2N5ceres17IterationCallbackE">
<span id="ceres::IterationCallback"></span><em class="property">class </em><code class="descclassname"></code><code class="descname">IterationCallback</code><a class="headerlink" href="#_CPPv2N5ceres17IterationCallbackE" title="Permalink to this definition">¶</a></dt>
<dd><blockquote>
<div><p>Interface for specifying callbacks that are executed at the end of
each iteration of the minimizer.</p>
<div class="highlight-c++"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">IterationCallback</span> <span class="p">{</span>
 <span class="k">public</span><span class="o">:</span>
  <span class="k">virtual</span> <span class="o">~</span><span class="n">IterationCallback</span><span class="p">()</span> <span class="p">{}</span>
  <span class="k">virtual</span> <span class="n">CallbackReturnType</span> <span class="k">operator</span><span class="p">()(</span><span class="k">const</span> <span class="n">IterationSummary</span><span class="o">&amp;</span> <span class="n">summary</span><span class="p">)</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span>
<span class="p">};</span>
</pre></div>
</div>
</div></blockquote>
<p>The solver uses the return value of <code class="docutils literal"><span class="pre">operator()</span></code> to decide whether
to continue solving or to terminate. The user can return three
values.</p>
<ol class="arabic simple">
<li><code class="docutils literal"><span class="pre">SOLVER_ABORT</span></code> indicates that the callback detected an abnormal
situation. The solver returns without updating the parameter
blocks (unless <code class="docutils literal"><span class="pre">Solver::Options::update_state_every_iteration</span></code> is
set true). Solver returns with <code class="docutils literal"><span class="pre">Solver::Summary::termination_type</span></code>
set to <code class="docutils literal"><span class="pre">USER_FAILURE</span></code>.</li>
<li><code class="docutils literal"><span class="pre">SOLVER_TERMINATE_SUCCESSFULLY</span></code> indicates that there is no need
to optimize anymore (some user specified termination criterion
has been met). Solver returns with
<code class="docutils literal"><span class="pre">Solver::Summary::termination_type`</span></code> set to <code class="docutils literal"><span class="pre">USER_SUCCESS</span></code>.</li>
<li><code class="docutils literal"><span class="pre">SOLVER_CONTINUE</span></code> indicates that the solver should continue
optimizing.</li>
</ol>
<p>For example, the following <a class="reference internal" href="#_CPPv2N5ceres17IterationCallbackE" title="ceres::IterationCallback"><code class="xref cpp cpp-class docutils literal"><span class="pre">IterationCallback</span></code></a> is used
internally by Ceres to log the progress of the optimization.</p>
<div class="highlight-c++"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">LoggingCallback</span> <span class="o">:</span> <span class="k">public</span> <span class="n">IterationCallback</span> <span class="p">{</span>
 <span class="k">public</span><span class="o">:</span>
  <span class="k">explicit</span> <span class="n">LoggingCallback</span><span class="p">(</span><span class="kt">bool</span> <span class="n">log_to_stdout</span><span class="p">)</span>
      <span class="o">:</span> <span class="n">log_to_stdout_</span><span class="p">(</span><span class="n">log_to_stdout</span><span class="p">)</span> <span class="p">{}</span>

  <span class="o">~</span><span class="n">LoggingCallback</span><span class="p">()</span> <span class="p">{}</span>

  <span class="n">CallbackReturnType</span> <span class="k">operator</span><span class="p">()(</span><span class="k">const</span> <span class="n">IterationSummary</span><span class="o">&amp;</span> <span class="n">summary</span><span class="p">)</span> <span class="p">{</span>
    <span class="k">const</span> <span class="kt">char</span><span class="o">*</span> <span class="n">kReportRowFormat</span> <span class="o">=</span>
        <span class="s">&quot;% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e &quot;</span>
        <span class="s">&quot;rho:% 3.2e mu:% 3.2e eta:% 3.2e li:% 3d&quot;</span><span class="p">;</span>
    <span class="n">string</span> <span class="n">output</span> <span class="o">=</span> <span class="n">StringPrintf</span><span class="p">(</span><span class="n">kReportRowFormat</span><span class="p">,</span>
                                 <span class="n">summary</span><span class="p">.</span><span class="n">iteration</span><span class="p">,</span>
                                 <span class="n">summary</span><span class="p">.</span><span class="n">cost</span><span class="p">,</span>
                                 <span class="n">summary</span><span class="p">.</span><span class="n">cost_change</span><span class="p">,</span>
                                 <span class="n">summary</span><span class="p">.</span><span class="n">gradient_max_norm</span><span class="p">,</span>
                                 <span class="n">summary</span><span class="p">.</span><span class="n">step_norm</span><span class="p">,</span>
                                 <span class="n">summary</span><span class="p">.</span><span class="n">relative_decrease</span><span class="p">,</span>
                                 <span class="n">summary</span><span class="p">.</span><span class="n">trust_region_radius</span><span class="p">,</span>
                                 <span class="n">summary</span><span class="p">.</span><span class="n">eta</span><span class="p">,</span>
                                 <span class="n">summary</span><span class="p">.</span><span class="n">linear_solver_iterations</span><span class="p">);</span>
    <span class="k">if</span> <span class="p">(</span><span class="n">log_to_stdout_</span><span class="p">)</span> <span class="p">{</span>
      <span class="n">cout</span> <span class="o">&lt;&lt;</span> <span class="n">output</span> <span class="o">&lt;&lt;</span> <span class="n">endl</span><span class="p">;</span>
    <span class="p">}</span> <span class="k">else</span> <span class="p">{</span>
      <span class="n">VLOG</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span> <span class="o">&lt;&lt;</span> <span class="n">output</span><span class="p">;</span>
    <span class="p">}</span>
    <span class="k">return</span> <span class="n">SOLVER_CONTINUE</span><span class="p">;</span>
  <span class="p">}</span>

 <span class="k">private</span><span class="o">:</span>
  <span class="k">const</span> <span class="kt">bool</span> <span class="n">log_to_stdout_</span><span class="p">;</span>
<span class="p">};</span>
</pre></div>
</div>
</dd></dl>

</div>
<div class="section" id="crsmatrix">
<h2><a class="reference internal" href="#_CPPv2N5ceres9CRSMatrixE" title="ceres::CRSMatrix"><code class="xref cpp cpp-class docutils literal"><span class="pre">CRSMatrix</span></code></a><a class="headerlink" href="#crsmatrix" title="Permalink to this headline">¶</a></h2>
<dl class="class">
<dt id="_CPPv2N5ceres9CRSMatrixE">
<span id="ceres::CRSMatrix"></span><em class="property">class </em><code class="descclassname"></code><code class="descname">CRSMatrix</code><a class="headerlink" href="#_CPPv2N5ceres9CRSMatrixE" title="Permalink to this definition">¶</a></dt>
<dd><p>A compressed row sparse matrix used primarily for communicating the
Jacobian matrix to the user.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N9CRSMatrix8num_rowsE">
<span id="ceres::CRSMatrix::num_rows__i"></span>int <code class="descclassname">CRSMatrix::</code><code class="descname">num_rows</code><a class="headerlink" href="#_CPPv2N9CRSMatrix8num_rowsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of rows.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N9CRSMatrix8num_colsE">
<span id="ceres::CRSMatrix::num_cols__i"></span>int <code class="descclassname">CRSMatrix::</code><code class="descname">num_cols</code><a class="headerlink" href="#_CPPv2N9CRSMatrix8num_colsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of columns.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N9CRSMatrix4rowsE">
<span id="ceres::CRSMatrix::rows__vector:i:"></span>vector&lt;int&gt; <code class="descclassname">CRSMatrix::</code><code class="descname">rows</code><a class="headerlink" href="#_CPPv2N9CRSMatrix4rowsE" title="Permalink to this definition">¶</a></dt>
<dd><p><a class="reference internal" href="#_CPPv2N9CRSMatrix4rowsE" title="ceres::CRSMatrix::rows"><code class="xref cpp cpp-member docutils literal"><span class="pre">CRSMatrix::rows</span></code></a> is a <a class="reference internal" href="#_CPPv2N9CRSMatrix8num_rowsE" title="ceres::CRSMatrix::num_rows"><code class="xref cpp cpp-member docutils literal"><span class="pre">CRSMatrix::num_rows</span></code></a> + 1
sized array that points into the <a class="reference internal" href="#_CPPv2N9CRSMatrix4colsE" title="ceres::CRSMatrix::cols"><code class="xref cpp cpp-member docutils literal"><span class="pre">CRSMatrix::cols</span></code></a> and
<a class="reference internal" href="#_CPPv2N9CRSMatrix6valuesE" title="ceres::CRSMatrix::values"><code class="xref cpp cpp-member docutils literal"><span class="pre">CRSMatrix::values</span></code></a> array.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N9CRSMatrix4colsE">
<span id="ceres::CRSMatrix::cols__vector:i:"></span>vector&lt;int&gt; <code class="descclassname">CRSMatrix::</code><code class="descname">cols</code><a class="headerlink" href="#_CPPv2N9CRSMatrix4colsE" title="Permalink to this definition">¶</a></dt>
<dd><p><a class="reference internal" href="#_CPPv2N9CRSMatrix4colsE" title="ceres::CRSMatrix::cols"><code class="xref cpp cpp-member docutils literal"><span class="pre">CRSMatrix::cols</span></code></a> contain as many entries as there are
non-zeros in the matrix.</p>
<p>For each row <code class="docutils literal"><span class="pre">i</span></code>, <code class="docutils literal"><span class="pre">cols[rows[i]]</span></code> ... <code class="docutils literal"><span class="pre">cols[rows[i</span> <span class="pre">+</span> <span class="pre">1]</span> <span class="pre">-</span> <span class="pre">1]</span></code>
are the indices of the non-zero columns of row <code class="docutils literal"><span class="pre">i</span></code>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N9CRSMatrix6valuesE">
<span id="ceres::CRSMatrix::values__vector:i:"></span>vector&lt;int&gt; <code class="descclassname">CRSMatrix::</code><code class="descname">values</code><a class="headerlink" href="#_CPPv2N9CRSMatrix6valuesE" title="Permalink to this definition">¶</a></dt>
<dd><p><a class="reference internal" href="#_CPPv2N9CRSMatrix6valuesE" title="ceres::CRSMatrix::values"><code class="xref cpp cpp-member docutils literal"><span class="pre">CRSMatrix::values</span></code></a> contain as many entries as there are
non-zeros in the matrix.</p>
<p>For each row <code class="docutils literal"><span class="pre">i</span></code>,
<code class="docutils literal"><span class="pre">values[rows[i]]</span></code> ... <code class="docutils literal"><span class="pre">values[rows[i</span> <span class="pre">+</span> <span class="pre">1]</span> <span class="pre">-</span> <span class="pre">1]</span></code> are the values
of the non-zero columns of row <code class="docutils literal"><span class="pre">i</span></code>.</p>
</dd></dl>

<p>e.g., consider the 3x4 sparse matrix</p>
<div class="highlight-c++"><div class="highlight"><pre><span></span><span class="mi">0</span> <span class="mi">10</span>  <span class="mi">0</span>  <span class="mi">4</span>
<span class="mi">0</span>  <span class="mi">2</span> <span class="o">-</span><span class="mi">3</span>  <span class="mi">2</span>
<span class="mi">1</span>  <span class="mi">2</span>  <span class="mi">0</span>  <span class="mi">0</span>
</pre></div>
</div>
<p>The three arrays will be:</p>
<div class="highlight-c++"><div class="highlight"><pre><span></span>         <span class="o">-</span><span class="n">row0</span><span class="o">-</span>  <span class="o">---</span><span class="n">row1</span><span class="o">---</span>  <span class="o">-</span><span class="n">row2</span><span class="o">-</span>
<span class="n">rows</span>   <span class="o">=</span> <span class="p">[</span> <span class="mi">0</span><span class="p">,</span>      <span class="mi">2</span><span class="p">,</span>          <span class="mi">5</span><span class="p">,</span>     <span class="mi">7</span><span class="p">]</span>
<span class="n">cols</span>   <span class="o">=</span> <span class="p">[</span> <span class="mi">1</span><span class="p">,</span>  <span class="mi">3</span><span class="p">,</span>  <span class="mi">1</span><span class="p">,</span>  <span class="mi">2</span><span class="p">,</span>  <span class="mi">3</span><span class="p">,</span>  <span class="mi">0</span><span class="p">,</span>  <span class="mi">1</span><span class="p">]</span>
<span class="n">values</span> <span class="o">=</span> <span class="p">[</span><span class="mi">10</span><span class="p">,</span>  <span class="mi">4</span><span class="p">,</span>  <span class="mi">2</span><span class="p">,</span> <span class="o">-</span><span class="mi">3</span><span class="p">,</span>  <span class="mi">2</span><span class="p">,</span>  <span class="mi">1</span><span class="p">,</span>  <span class="mi">2</span><span class="p">]</span>
</pre></div>
</div>
</div>
<div class="section" id="solver-summary">
<h2><a class="reference internal" href="#_CPPv2N5ceres6Solver7SummaryE" title="ceres::Solver::Summary"><code class="xref cpp cpp-class docutils literal"><span class="pre">Solver::Summary</span></code></a><a class="headerlink" href="#solver-summary" title="Permalink to this headline">¶</a></h2>
<dl class="class">
<dt id="_CPPv2N5ceres6Solver7SummaryE">
<span id="ceres::Solver::Summary"></span><em class="property">class </em><code class="descclassname">Solver::</code><code class="descname">Summary</code><a class="headerlink" href="#_CPPv2N5ceres6Solver7SummaryE" title="Permalink to this definition">¶</a></dt>
<dd><p>Summary of the various stages of the solver after termination.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2NK5ceres6Solver7Summary11BriefReportEv">
<span id="ceres::Solver::Summary::BriefReportC"></span>string <code class="descclassname">Solver::Summary::</code><code class="descname">BriefReport</code><span class="sig-paren">(</span><span class="sig-paren">)</span> <em class="property">const</em><a class="headerlink" href="#_CPPv2NK5ceres6Solver7Summary11BriefReportEv" title="Permalink to this definition">¶</a></dt>
<dd><p>A brief one line description of the state of the solver after
termination.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2NK5ceres6Solver7Summary10FullReportEv">
<span id="ceres::Solver::Summary::FullReportC"></span>string <code class="descclassname">Solver::Summary::</code><code class="descname">FullReport</code><span class="sig-paren">(</span><span class="sig-paren">)</span> <em class="property">const</em><a class="headerlink" href="#_CPPv2NK5ceres6Solver7Summary10FullReportEv" title="Permalink to this definition">¶</a></dt>
<dd><p>A full multiline description of the state of the solver after
termination.</p>
</dd></dl>

<dl class="function">
<dt id="_CPPv2NK5ceres6Solver7Summary16IsSolutionUsableEv">
<span id="ceres::Solver::Summary::IsSolutionUsableC"></span>bool <code class="descclassname">Solver::Summary::</code><code class="descname">IsSolutionUsable</code><span class="sig-paren">(</span><span class="sig-paren">)</span> <em class="property">const</em><a class="headerlink" href="#_CPPv2NK5ceres6Solver7Summary16IsSolutionUsableEv" title="Permalink to this definition">¶</a></dt>
<dd><p>Whether the solution returned by the optimization algorithm can be
relied on to be numerically sane. This will be the case if
<cite>Solver::Summary:termination_type</cite> is set to <cite>CONVERGENCE</cite>,
<cite>USER_SUCCESS</cite> or <cite>NO_CONVERGENCE</cite>, i.e., either the solver
converged by meeting one of the convergence tolerances or because
the user indicated that it had converged or it ran to the maximum
number of iterations or time.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary14minimizer_typeE">
<span id="ceres::Solver::Summary::minimizer_type__MinimizerType"></span>MinimizerType <code class="descclassname">Solver::Summary::</code><code class="descname">minimizer_type</code><a class="headerlink" href="#_CPPv2N6Solver7Summary14minimizer_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Type of minimization algorithm used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary16termination_typeE">
<span id="ceres::Solver::Summary::termination_type__TerminationType"></span>TerminationType <code class="descclassname">Solver::Summary::</code><code class="descname">termination_type</code><a class="headerlink" href="#_CPPv2N6Solver7Summary16termination_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>The cause of the minimizer terminating.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary7messageE">
<span id="ceres::Solver::Summary::message__string"></span>string <code class="descclassname">Solver::Summary::</code><code class="descname">message</code><a class="headerlink" href="#_CPPv2N6Solver7Summary7messageE" title="Permalink to this definition">¶</a></dt>
<dd><p>Reason why the solver terminated.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary12initial_costE">
<span id="ceres::Solver::Summary::initial_cost__double"></span>double <code class="descclassname">Solver::Summary::</code><code class="descname">initial_cost</code><a class="headerlink" href="#_CPPv2N6Solver7Summary12initial_costE" title="Permalink to this definition">¶</a></dt>
<dd><p>Cost of the problem (value of the objective function) before the
optimization.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary10final_costE">
<span id="ceres::Solver::Summary::final_cost__double"></span>double <code class="descclassname">Solver::Summary::</code><code class="descname">final_cost</code><a class="headerlink" href="#_CPPv2N6Solver7Summary10final_costE" title="Permalink to this definition">¶</a></dt>
<dd><p>Cost of the problem (value of the objective function) after the
optimization.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary10fixed_costE">
<span id="ceres::Solver::Summary::fixed_cost__double"></span>double <code class="descclassname">Solver::Summary::</code><code class="descname">fixed_cost</code><a class="headerlink" href="#_CPPv2N6Solver7Summary10fixed_costE" title="Permalink to this definition">¶</a></dt>
<dd><p>The part of the total cost that comes from residual blocks that
were held fixed by the preprocessor because all the parameter
blocks that they depend on were fixed.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary10iterationsE">
<span id="ceres::Solver::Summary::iterations__vector:IterationSummary:"></span>vector&lt;<a class="reference internal" href="#_CPPv2N5ceres16IterationSummaryE" title="ceres::IterationSummary">IterationSummary</a>&gt; <code class="descclassname">Solver::Summary::</code><code class="descname">iterations</code><a class="headerlink" href="#_CPPv2N6Solver7Summary10iterationsE" title="Permalink to this definition">¶</a></dt>
<dd><p><a class="reference internal" href="#_CPPv2N5ceres16IterationSummaryE" title="ceres::IterationSummary"><code class="xref cpp cpp-class docutils literal"><span class="pre">IterationSummary</span></code></a> for each minimizer iteration in order.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary20num_successful_stepsE">
<span id="ceres::Solver::Summary::num_successful_steps__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_successful_steps</code><a class="headerlink" href="#_CPPv2N6Solver7Summary20num_successful_stepsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of minimizer iterations in which the step was
accepted. Unless <code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Options::use_non_monotonic_steps</span></code>
is <cite>true</cite> this is also the number of steps in which the objective
function value/cost went down.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary22num_unsuccessful_stepsE">
<span id="ceres::Solver::Summary::num_unsuccessful_steps__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_unsuccessful_steps</code><a class="headerlink" href="#_CPPv2N6Solver7Summary22num_unsuccessful_stepsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of minimizer iterations in which the step was rejected
either because it did not reduce the cost enough or the step was
not numerically valid.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary25num_inner_iteration_stepsE">
<span id="ceres::Solver::Summary::num_inner_iteration_steps__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_inner_iteration_steps</code><a class="headerlink" href="#_CPPv2N6Solver7Summary25num_inner_iteration_stepsE" title="Permalink to this definition">¶</a></dt>
<dd><blockquote>
<div>Number of times inner iterations were performed.</div></blockquote>
<dl class="member">
<dt id="_CPPv2N6Solver7Summary21num_line_search_stepsE">
<span id="ceres::Solver::Summary::num_line_search_steps__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_line_search_steps</code><a class="headerlink" href="#_CPPv2N6Solver7Summary21num_line_search_stepsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Total number of iterations inside the line search algorithm across
all invocations. We call these iterations &#8220;steps&#8221; to distinguish
them from the outer iterations of the line search and trust region
minimizer algorithms which call the line search algorithm as a
subroutine.</p>
</dd></dl>

</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary28preprocessor_time_in_secondsE">
<span id="ceres::Solver::Summary::preprocessor_time_in_seconds__double"></span>double <code class="descclassname">Solver::Summary::</code><code class="descname">preprocessor_time_in_seconds</code><a class="headerlink" href="#_CPPv2N6Solver7Summary28preprocessor_time_in_secondsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Time (in seconds) spent in the preprocessor.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary25minimizer_time_in_secondsE">
<span id="ceres::Solver::Summary::minimizer_time_in_seconds__double"></span>double <code class="descclassname">Solver::Summary::</code><code class="descname">minimizer_time_in_seconds</code><a class="headerlink" href="#_CPPv2N6Solver7Summary25minimizer_time_in_secondsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Time (in seconds) spent in the Minimizer.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary29postprocessor_time_in_secondsE">
<span id="ceres::Solver::Summary::postprocessor_time_in_seconds__double"></span>double <code class="descclassname">Solver::Summary::</code><code class="descname">postprocessor_time_in_seconds</code><a class="headerlink" href="#_CPPv2N6Solver7Summary29postprocessor_time_in_secondsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Time (in seconds) spent in the post processor.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary21total_time_in_secondsE">
<span id="ceres::Solver::Summary::total_time_in_seconds__double"></span>double <code class="descclassname">Solver::Summary::</code><code class="descname">total_time_in_seconds</code><a class="headerlink" href="#_CPPv2N6Solver7Summary21total_time_in_secondsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Time (in seconds) spent in the solver.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary29linear_solver_time_in_secondsE">
<span id="ceres::Solver::Summary::linear_solver_time_in_seconds__double"></span>double <code class="descclassname">Solver::Summary::</code><code class="descname">linear_solver_time_in_seconds</code><a class="headerlink" href="#_CPPv2N6Solver7Summary29linear_solver_time_in_secondsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Time (in seconds) spent in the linear solver computing the trust
region step.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary35residual_evaluation_time_in_secondsE">
<span id="ceres::Solver::Summary::residual_evaluation_time_in_seconds__double"></span>double <code class="descclassname">Solver::Summary::</code><code class="descname">residual_evaluation_time_in_seconds</code><a class="headerlink" href="#_CPPv2N6Solver7Summary35residual_evaluation_time_in_secondsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Time (in seconds) spent evaluating the residual vector.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary35jacobian_evaluation_time_in_secondsE">
<span id="ceres::Solver::Summary::jacobian_evaluation_time_in_seconds__double"></span>double <code class="descclassname">Solver::Summary::</code><code class="descname">jacobian_evaluation_time_in_seconds</code><a class="headerlink" href="#_CPPv2N6Solver7Summary35jacobian_evaluation_time_in_secondsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Time (in seconds) spent evaluating the Jacobian matrix.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary31inner_iteration_time_in_secondsE">
<span id="ceres::Solver::Summary::inner_iteration_time_in_seconds__double"></span>double <code class="descclassname">Solver::Summary::</code><code class="descname">inner_iteration_time_in_seconds</code><a class="headerlink" href="#_CPPv2N6Solver7Summary31inner_iteration_time_in_secondsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Time (in seconds) spent doing inner iterations.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary20num_parameter_blocksE">
<span id="ceres::Solver::Summary::num_parameter_blocks__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_parameter_blocks</code><a class="headerlink" href="#_CPPv2N6Solver7Summary20num_parameter_blocksE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of parameter blocks in the problem.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary14num_parametersE">
<span id="ceres::Solver::Summary::num_parameters__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_parameters</code><a class="headerlink" href="#_CPPv2N6Solver7Summary14num_parametersE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of parameters in the problem.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary24num_effective_parametersE">
<span id="ceres::Solver::Summary::num_effective_parameters__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_effective_parameters</code><a class="headerlink" href="#_CPPv2N6Solver7Summary24num_effective_parametersE" title="Permalink to this definition">¶</a></dt>
<dd><p>Dimension of the tangent space of the problem (or the number of
columns in the Jacobian for the problem). This is different from
<a class="reference internal" href="#_CPPv2N6Solver7Summary14num_parametersE" title="ceres::Solver::Summary::num_parameters"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Summary::num_parameters</span></code></a> if a parameter block is
associated with a <a class="reference internal" href="nnls_modeling.html#_CPPv2N5ceres21LocalParameterizationE" title="ceres::LocalParameterization"><code class="xref cpp cpp-class docutils literal"><span class="pre">LocalParameterization</span></code></a>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary19num_residual_blocksE">
<span id="ceres::Solver::Summary::num_residual_blocks__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_residual_blocks</code><a class="headerlink" href="#_CPPv2N6Solver7Summary19num_residual_blocksE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of residual blocks in the problem.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary13num_residualsE">
<span id="ceres::Solver::Summary::num_residuals__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_residuals</code><a class="headerlink" href="#_CPPv2N6Solver7Summary13num_residualsE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of residuals in the problem.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary28num_parameter_blocks_reducedE">
<span id="ceres::Solver::Summary::num_parameter_blocks_reduced__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_parameter_blocks_reduced</code><a class="headerlink" href="#_CPPv2N6Solver7Summary28num_parameter_blocks_reducedE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of parameter blocks in the problem after the inactive and
constant parameter blocks have been removed. A parameter block is
inactive if no residual block refers to it.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary22num_parameters_reducedE">
<span id="ceres::Solver::Summary::num_parameters_reduced__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_parameters_reduced</code><a class="headerlink" href="#_CPPv2N6Solver7Summary22num_parameters_reducedE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of parameters in the reduced problem.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary32num_effective_parameters_reducedE">
<span id="ceres::Solver::Summary::num_effective_parameters_reduced__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_effective_parameters_reduced</code><a class="headerlink" href="#_CPPv2N6Solver7Summary32num_effective_parameters_reducedE" title="Permalink to this definition">¶</a></dt>
<dd><p>Dimension of the tangent space of the reduced problem (or the
number of columns in the Jacobian for the reduced problem). This is
different from <a class="reference internal" href="#_CPPv2N6Solver7Summary22num_parameters_reducedE" title="ceres::Solver::Summary::num_parameters_reduced"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Summary::num_parameters_reduced</span></code></a> if
a parameter block in the reduced problem is associated with a
<a class="reference internal" href="nnls_modeling.html#_CPPv2N5ceres21LocalParameterizationE" title="ceres::LocalParameterization"><code class="xref cpp cpp-class docutils literal"><span class="pre">LocalParameterization</span></code></a>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary27num_residual_blocks_reducedE">
<span id="ceres::Solver::Summary::num_residual_blocks_reduced__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_residual_blocks_reduced</code><a class="headerlink" href="#_CPPv2N6Solver7Summary27num_residual_blocks_reducedE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of residual blocks in the reduced problem.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary21num_residuals_reducedE">
<span id="ceres::Solver::Summary::num_residuals_reduced__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_residuals_reduced</code><a class="headerlink" href="#_CPPv2N6Solver7Summary21num_residuals_reducedE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of residuals in the reduced problem.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary17num_threads_givenE">
<span id="ceres::Solver::Summary::num_threads_given__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_threads_given</code><a class="headerlink" href="#_CPPv2N6Solver7Summary17num_threads_givenE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of threads specified by the user for Jacobian and residual
evaluation.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary16num_threads_usedE">
<span id="ceres::Solver::Summary::num_threads_used__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_threads_used</code><a class="headerlink" href="#_CPPv2N6Solver7Summary16num_threads_usedE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of threads actually used by the solver for Jacobian and
residual evaluation. This number is not equal to
<a class="reference internal" href="#_CPPv2N6Solver7Summary17num_threads_givenE" title="ceres::Solver::Summary::num_threads_given"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Summary::num_threads_given</span></code></a> if <cite>OpenMP</cite> is not
available.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary31num_linear_solver_threads_givenE">
<span id="ceres::Solver::Summary::num_linear_solver_threads_given__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_linear_solver_threads_given</code><a class="headerlink" href="#_CPPv2N6Solver7Summary31num_linear_solver_threads_givenE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of threads specified by the user for solving the trust
region problem.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary30num_linear_solver_threads_usedE">
<span id="ceres::Solver::Summary::num_linear_solver_threads_used__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">num_linear_solver_threads_used</code><a class="headerlink" href="#_CPPv2N6Solver7Summary30num_linear_solver_threads_usedE" title="Permalink to this definition">¶</a></dt>
<dd><p>Number of threads actually used by the solver for solving the trust
region problem. This number is not equal to
<a class="reference internal" href="#_CPPv2N6Solver7Summary31num_linear_solver_threads_givenE" title="ceres::Solver::Summary::num_linear_solver_threads_given"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Summary::num_linear_solver_threads_given</span></code></a> if
<cite>OpenMP</cite> is not available.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary24linear_solver_type_givenE">
<span id="ceres::Solver::Summary::linear_solver_type_given__LinearSolverType"></span>LinearSolverType <code class="descclassname">Solver::Summary::</code><code class="descname">linear_solver_type_given</code><a class="headerlink" href="#_CPPv2N6Solver7Summary24linear_solver_type_givenE" title="Permalink to this definition">¶</a></dt>
<dd><p>Type of the linear solver requested by the user.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary23linear_solver_type_usedE">
<span id="ceres::Solver::Summary::linear_solver_type_used__LinearSolverType"></span>LinearSolverType <code class="descclassname">Solver::Summary::</code><code class="descname">linear_solver_type_used</code><a class="headerlink" href="#_CPPv2N6Solver7Summary23linear_solver_type_usedE" title="Permalink to this definition">¶</a></dt>
<dd><p>Type of the linear solver actually used. This may be different from
<a class="reference internal" href="#_CPPv2N6Solver7Summary24linear_solver_type_givenE" title="ceres::Solver::Summary::linear_solver_type_given"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Summary::linear_solver_type_given</span></code></a> if Ceres
determines that the problem structure is not compatible with the
linear solver requested or if the linear solver requested by the
user is not available, e.g. The user requested
<cite>SPARSE_NORMAL_CHOLESKY</cite> but no sparse linear algebra library was
available.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary28linear_solver_ordering_givenE">
<span id="ceres::Solver::Summary::linear_solver_ordering_given__vector:i:"></span>vector&lt;int&gt; <code class="descclassname">Solver::Summary::</code><code class="descname">linear_solver_ordering_given</code><a class="headerlink" href="#_CPPv2N6Solver7Summary28linear_solver_ordering_givenE" title="Permalink to this definition">¶</a></dt>
<dd><p>Size of the elimination groups given by the user as hints to the
linear solver.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary27linear_solver_ordering_usedE">
<span id="ceres::Solver::Summary::linear_solver_ordering_used__vector:i:"></span>vector&lt;int&gt; <code class="descclassname">Solver::Summary::</code><code class="descname">linear_solver_ordering_used</code><a class="headerlink" href="#_CPPv2N6Solver7Summary27linear_solver_ordering_usedE" title="Permalink to this definition">¶</a></dt>
<dd><p>Size of the parameter groups used by the solver when ordering the
columns of the Jacobian.  This maybe different from
<a class="reference internal" href="#_CPPv2N6Solver7Summary28linear_solver_ordering_givenE" title="ceres::Solver::Summary::linear_solver_ordering_given"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Summary::linear_solver_ordering_given</span></code></a> if the user
left <a class="reference internal" href="#_CPPv2N6Solver7Summary28linear_solver_ordering_givenE" title="ceres::Solver::Summary::linear_solver_ordering_given"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Summary::linear_solver_ordering_given</span></code></a> blank
and asked for an automatic ordering, or if the problem contains
some constant or inactive parameter blocks.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary21schur_structure_givenE">
<span id="ceres::Solver::Summary::schur_structure_given__ss"></span>std::string <code class="descclassname">Solver::Summary::</code><code class="descname">schur_structure_given</code><a class="headerlink" href="#_CPPv2N6Solver7Summary21schur_structure_givenE" title="Permalink to this definition">¶</a></dt>
<dd><p>For Schur type linear solvers, this string describes the template
specialization which was detected in the problem and should be
used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary20schur_structure_usedE">
<span id="ceres::Solver::Summary::schur_structure_used__ss"></span>std::string <code class="descclassname">Solver::Summary::</code><code class="descname">schur_structure_used</code><a class="headerlink" href="#_CPPv2N6Solver7Summary20schur_structure_usedE" title="Permalink to this definition">¶</a></dt>
<dd><p>For Schur type linear solvers, this string describes the template
specialization that was actually instantiated and used. The reason
this will be different from
<a class="reference internal" href="#_CPPv2N6Solver7Summary21schur_structure_givenE" title="ceres::Solver::Summary::schur_structure_given"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Summary::schur_structure_given</span></code></a> is because the
corresponding template specialization does not exist.</p>
<p>Template specializations can be added to ceres by editing
<code class="docutils literal"><span class="pre">internal/ceres/generate_template_specializations.py</span></code></p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary22inner_iterations_givenE">
<span id="ceres::Solver::Summary::inner_iterations_given__b"></span>bool <code class="descclassname">Solver::Summary::</code><code class="descname">inner_iterations_given</code><a class="headerlink" href="#_CPPv2N6Solver7Summary22inner_iterations_givenE" title="Permalink to this definition">¶</a></dt>
<dd><p><cite>True</cite> if the user asked for inner iterations to be used as part of
the optimization.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary21inner_iterations_usedE">
<span id="ceres::Solver::Summary::inner_iterations_used__b"></span>bool <code class="descclassname">Solver::Summary::</code><code class="descname">inner_iterations_used</code><a class="headerlink" href="#_CPPv2N6Solver7Summary21inner_iterations_usedE" title="Permalink to this definition">¶</a></dt>
<dd><p><cite>True</cite> if the user asked for inner iterations to be used as part of
the optimization and the problem structure was such that they were
actually performed. For example, in a problem with just one parameter
block, inner iterations are not performed.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv230inner_iteration_ordering_given">
<span id="ceres::inner_iteration_ordering_given__vector:i:"></span>vector&lt;int&gt; <code class="descclassname"></code><code class="descname">inner_iteration_ordering_given</code><a class="headerlink" href="#_CPPv230inner_iteration_ordering_given" title="Permalink to this definition">¶</a></dt>
<dd><p>Size of the parameter groups given by the user for performing inner
iterations.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv229inner_iteration_ordering_used">
<span id="ceres::inner_iteration_ordering_used__vector:i:"></span>vector&lt;int&gt; <code class="descclassname"></code><code class="descname">inner_iteration_ordering_used</code><a class="headerlink" href="#_CPPv229inner_iteration_ordering_used" title="Permalink to this definition">¶</a></dt>
<dd><p>Size of the parameter groups given used by the solver for
performing inner iterations. This maybe different from
<code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Summary::inner_iteration_ordering_given</span></code> if the
user left <code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Summary::inner_iteration_ordering_given</span></code>
blank and asked for an automatic ordering, or if the problem
contains some constant or inactive parameter blocks.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary25preconditioner_type_givenE">
<span id="ceres::Solver::Summary::preconditioner_type_given__PreconditionerType"></span>PreconditionerType <code class="descclassname">Solver::Summary::</code><code class="descname">preconditioner_type_given</code><a class="headerlink" href="#_CPPv2N6Solver7Summary25preconditioner_type_givenE" title="Permalink to this definition">¶</a></dt>
<dd><p>Type of the preconditioner requested by the user.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary24preconditioner_type_usedE">
<span id="ceres::Solver::Summary::preconditioner_type_used__PreconditionerType"></span>PreconditionerType <code class="descclassname">Solver::Summary::</code><code class="descname">preconditioner_type_used</code><a class="headerlink" href="#_CPPv2N6Solver7Summary24preconditioner_type_usedE" title="Permalink to this definition">¶</a></dt>
<dd><p>Type of the preconditioner actually used. This may be different
from <a class="reference internal" href="#_CPPv2N6Solver7Summary24linear_solver_type_givenE" title="ceres::Solver::Summary::linear_solver_type_given"><code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Summary::linear_solver_type_given</span></code></a> if Ceres
determines that the problem structure is not compatible with the
linear solver requested or if the linear solver requested by the
user is not available.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary26visibility_clustering_typeE">
<span id="ceres::Solver::Summary::visibility_clustering_type__VisibilityClusteringType"></span>VisibilityClusteringType <code class="descclassname">Solver::Summary::</code><code class="descname">visibility_clustering_type</code><a class="headerlink" href="#_CPPv2N6Solver7Summary26visibility_clustering_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Type of clustering algorithm used for visibility based
preconditioning. Only meaningful when the
<code class="xref cpp cpp-member docutils literal"><span class="pre">Solver::Summary::preconditioner_type</span></code> is
<code class="docutils literal"><span class="pre">CLUSTER_JACOBI</span></code> or <code class="docutils literal"><span class="pre">CLUSTER_TRIDIAGONAL</span></code>.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary26trust_region_strategy_typeE">
<span id="ceres::Solver::Summary::trust_region_strategy_type__TrustRegionStrategyType"></span>TrustRegionStrategyType <code class="descclassname">Solver::Summary::</code><code class="descname">trust_region_strategy_type</code><a class="headerlink" href="#_CPPv2N6Solver7Summary26trust_region_strategy_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Type of trust region strategy.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary11dogleg_typeE">
<span id="ceres::Solver::Summary::dogleg_type__DoglegType"></span>DoglegType <code class="descclassname">Solver::Summary::</code><code class="descname">dogleg_type</code><a class="headerlink" href="#_CPPv2N6Solver7Summary11dogleg_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Type of dogleg strategy used for solving the trust region problem.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary33dense_linear_algebra_library_typeE">
<span id="ceres::Solver::Summary::dense_linear_algebra_library_type__DenseLinearAlgebraLibraryType"></span>DenseLinearAlgebraLibraryType <code class="descclassname">Solver::Summary::</code><code class="descname">dense_linear_algebra_library_type</code><a class="headerlink" href="#_CPPv2N6Solver7Summary33dense_linear_algebra_library_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Type of the dense linear algebra library used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary34sparse_linear_algebra_library_typeE">
<span id="ceres::Solver::Summary::sparse_linear_algebra_library_type__SparseLinearAlgebraLibraryType"></span>SparseLinearAlgebraLibraryType <code class="descclassname">Solver::Summary::</code><code class="descname">sparse_linear_algebra_library_type</code><a class="headerlink" href="#_CPPv2N6Solver7Summary34sparse_linear_algebra_library_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Type of the sparse linear algebra library used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary26line_search_direction_typeE">
<span id="ceres::Solver::Summary::line_search_direction_type__LineSearchDirectionType"></span>LineSearchDirectionType <code class="descclassname">Solver::Summary::</code><code class="descname">line_search_direction_type</code><a class="headerlink" href="#_CPPv2N6Solver7Summary26line_search_direction_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Type of line search direction used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary16line_search_typeE">
<span id="ceres::Solver::Summary::line_search_type__LineSearchType"></span>LineSearchType <code class="descclassname">Solver::Summary::</code><code class="descname">line_search_type</code><a class="headerlink" href="#_CPPv2N6Solver7Summary16line_search_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>Type of the line search algorithm used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary30line_search_interpolation_typeE">
<span id="ceres::Solver::Summary::line_search_interpolation_type__LineSearchInterpolationType"></span>LineSearchInterpolationType <code class="descclassname">Solver::Summary::</code><code class="descname">line_search_interpolation_type</code><a class="headerlink" href="#_CPPv2N6Solver7Summary30line_search_interpolation_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>When performing line search, the degree of the polynomial used to
approximate the objective function.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary33nonlinear_conjugate_gradient_typeE">
<span id="ceres::Solver::Summary::nonlinear_conjugate_gradient_type__NonlinearConjugateGradientType"></span>NonlinearConjugateGradientType <code class="descclassname">Solver::Summary::</code><code class="descname">nonlinear_conjugate_gradient_type</code><a class="headerlink" href="#_CPPv2N6Solver7Summary33nonlinear_conjugate_gradient_typeE" title="Permalink to this definition">¶</a></dt>
<dd><p>If the line search direction is <cite>NONLINEAR_CONJUGATE_GRADIENT</cite>,
then this indicates the particular variant of non-linear conjugate
gradient used.</p>
</dd></dl>

<dl class="member">
<dt id="_CPPv2N6Solver7Summary14max_lbfgs_rankE">
<span id="ceres::Solver::Summary::max_lbfgs_rank__i"></span>int <code class="descclassname">Solver::Summary::</code><code class="descname">max_lbfgs_rank</code><a class="headerlink" href="#_CPPv2N6Solver7Summary14max_lbfgs_rankE" title="Permalink to this definition">¶</a></dt>
<dd><p>If the type of the line search direction is <cite>LBFGS</cite>, then this
indicates the rank of the Hessian approximation.</p>
</dd></dl>

</div>
</div>


           </div>
          </div>
          <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="nnls_covariance.html" class="btn btn-neutral float-right" title="Covariance Estimation" accesskey="n">Next <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="nnls_modeling.html" class="btn btn-neutral" title="Modeling Non-linear Least Squares" accesskey="p"><span class="fa fa-arrow-circle-left"></span> Previous</a>
      
    </div>
  

  <hr/>

  <div role="contentinfo">
    <p>
        &copy; Copyright 2016 Google Inc.

    </p>
  </div> 

</footer>

        </div>
      </div>

    </section>

  </div>
  


  

    <script type="text/javascript">
        var DOCUMENTATION_OPTIONS = {
            URL_ROOT:'./',
            VERSION:'1.13.0',
            COLLAPSE_INDEX:false,
            FILE_SUFFIX:'.html',
            HAS_SOURCE:  true
        };
    </script>
      <script type="text/javascript" src="_static/jquery.js"></script>
      <script type="text/javascript" src="_static/underscore.js"></script>
      <script type="text/javascript" src="_static/doctools.js"></script>
      <script type="text/javascript" src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML">
      MathJax.Hub.Config({
          "HTML-CSS": {
            availableFonts: ["TeX"]
          }
        });
      </script>

  

  
  
    <script type="text/javascript" src="_static/js/theme.js"></script>
  

  
  
  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.StickyNav.enable();
      });
  </script>
  
 
<script>
  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
  ga('create', 'UA-49769510-1', 'ceres-solver.org');
  ga('send', 'pageview');
</script>


</body>
</html>