|
Packit |
ea1746 |
// Ceres Solver - A fast non-linear least squares minimizer
|
|
Packit |
ea1746 |
// Copyright 2015 Google Inc. All rights reserved.
|
|
Packit |
ea1746 |
// http://ceres-solver.org/
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// Redistribution and use in source and binary forms, with or without
|
|
Packit |
ea1746 |
// modification, are permitted provided that the following conditions are met:
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// * Redistributions of source code must retain the above copyright notice,
|
|
Packit |
ea1746 |
// this list of conditions and the following disclaimer.
|
|
Packit |
ea1746 |
// * Redistributions in binary form must reproduce the above copyright notice,
|
|
Packit |
ea1746 |
// this list of conditions and the following disclaimer in the documentation
|
|
Packit |
ea1746 |
// and/or other materials provided with the distribution.
|
|
Packit |
ea1746 |
// * Neither the name of Google Inc. nor the names of its contributors may be
|
|
Packit |
ea1746 |
// used to endorse or promote products derived from this software without
|
|
Packit |
ea1746 |
// specific prior written permission.
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
Packit |
ea1746 |
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
Packit |
ea1746 |
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
Packit |
ea1746 |
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
Packit |
ea1746 |
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
Packit |
ea1746 |
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
Packit |
ea1746 |
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
Packit |
ea1746 |
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
Packit |
ea1746 |
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
Packit |
ea1746 |
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
Packit |
ea1746 |
// POSSIBILITY OF SUCH DAMAGE.
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// Author: sameeragarwal@google.com (Sameer Agarwal)
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
#include "ceres/corrector.h"
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
#include <cstddef>
|
|
Packit |
ea1746 |
#include <cmath>
|
|
Packit |
ea1746 |
#include "ceres/internal/eigen.h"
|
|
Packit |
ea1746 |
#include "glog/logging.h"
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
namespace ceres {
|
|
Packit |
ea1746 |
namespace internal {
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
Corrector::Corrector(const double sq_norm, const double rho[3]) {
|
|
Packit |
ea1746 |
CHECK_GE(sq_norm, 0.0);
|
|
Packit |
ea1746 |
sqrt_rho1_ = sqrt(rho[1]);
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
// If sq_norm = 0.0, the correction becomes trivial, the residual
|
|
Packit |
ea1746 |
// and the jacobian are scaled by the squareroot of the derivative
|
|
Packit |
ea1746 |
// of rho. Handling this case explicitly avoids the divide by zero
|
|
Packit |
ea1746 |
// error that would occur below.
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// The case where rho'' < 0 also gets special handling. Technically
|
|
Packit |
ea1746 |
// it shouldn't, and the computation of the scaling should proceed
|
|
Packit |
ea1746 |
// as below, however we found in experiments that applying the
|
|
Packit |
ea1746 |
// curvature correction when rho'' < 0, which is the case when we
|
|
Packit |
ea1746 |
// are in the outlier region slows down the convergence of the
|
|
Packit |
ea1746 |
// algorithm significantly.
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// Thus, we have divided the action of the robustifier into two
|
|
Packit |
ea1746 |
// parts. In the inliner region, we do the full second order
|
|
Packit |
ea1746 |
// correction which re-wights the gradient of the function by the
|
|
Packit |
ea1746 |
// square root of the derivative of rho, and the Gauss-Newton
|
|
Packit |
ea1746 |
// Hessian gets both the scaling and the rank-1 curvature
|
|
Packit |
ea1746 |
// correction. Normaly, alpha is upper bounded by one, but with this
|
|
Packit |
ea1746 |
// change, alpha is bounded above by zero.
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// Empirically we have observed that the full Triggs correction and
|
|
Packit |
ea1746 |
// the clamped correction both start out as very good approximations
|
|
Packit |
ea1746 |
// to the loss function when we are in the convex part of the
|
|
Packit |
ea1746 |
// function, but as the function starts transitioning from convex to
|
|
Packit |
ea1746 |
// concave, the Triggs approximation diverges more and more and
|
|
Packit |
ea1746 |
// ultimately becomes linear. The clamped Triggs model however
|
|
Packit |
ea1746 |
// remains quadratic.
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// The reason why the Triggs approximation becomes so poor is
|
|
Packit |
ea1746 |
// because the curvature correction that it applies to the gauss
|
|
Packit |
ea1746 |
// newton hessian goes from being a full rank correction to a rank
|
|
Packit |
ea1746 |
// deficient correction making the inversion of the Hessian fraught
|
|
Packit |
ea1746 |
// with all sorts of misery and suffering.
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// The clamped correction retains its quadratic nature and inverting it
|
|
Packit |
ea1746 |
// is always well formed.
|
|
Packit |
ea1746 |
if ((sq_norm == 0.0) || (rho[2] <= 0.0)) {
|
|
Packit |
ea1746 |
residual_scaling_ = sqrt_rho1_;
|
|
Packit |
ea1746 |
alpha_sq_norm_ = 0.0;
|
|
Packit |
ea1746 |
return;
|
|
Packit |
ea1746 |
}
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
// We now require that the first derivative of the loss function be
|
|
Packit |
ea1746 |
// positive only if the second derivative is positive. This is
|
|
Packit |
ea1746 |
// because when the second derivative is non-positive, we do not use
|
|
Packit |
ea1746 |
// the second order correction suggested by BANS and instead use a
|
|
Packit |
ea1746 |
// simpler first order strategy which does not use a division by the
|
|
Packit |
ea1746 |
// gradient of the loss function.
|
|
Packit |
ea1746 |
CHECK_GT(rho[1], 0.0);
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
// Calculate the smaller of the two solutions to the equation
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// 0.5 * alpha^2 - alpha - rho'' / rho' * z'z = 0.
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// Start by calculating the discriminant D.
|
|
Packit |
ea1746 |
const double D = 1.0 + 2.0 * sq_norm * rho[2] / rho[1];
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
// Since both rho[1] and rho[2] are guaranteed to be positive at
|
|
Packit |
ea1746 |
// this point, we know that D > 1.0.
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
const double alpha = 1.0 - sqrt(D);
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
// Calculate the constants needed by the correction routines.
|
|
Packit |
ea1746 |
residual_scaling_ = sqrt_rho1_ / (1 - alpha);
|
|
Packit |
ea1746 |
alpha_sq_norm_ = alpha / sq_norm;
|
|
Packit |
ea1746 |
}
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
void Corrector::CorrectResiduals(const int num_rows, double* residuals) {
|
|
Packit |
ea1746 |
DCHECK(residuals != NULL);
|
|
Packit |
ea1746 |
// Equation 11 in BANS.
|
|
Packit |
ea1746 |
VectorRef(residuals, num_rows) *= residual_scaling_;
|
|
Packit |
ea1746 |
}
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
void Corrector::CorrectJacobian(const int num_rows,
|
|
Packit |
ea1746 |
const int num_cols,
|
|
Packit |
ea1746 |
double* residuals,
|
|
Packit |
ea1746 |
double* jacobian) {
|
|
Packit |
ea1746 |
DCHECK(residuals != NULL);
|
|
Packit |
ea1746 |
DCHECK(jacobian != NULL);
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
// The common case (rho[2] <= 0).
|
|
Packit |
ea1746 |
if (alpha_sq_norm_ == 0.0) {
|
|
Packit |
ea1746 |
VectorRef(jacobian, num_rows * num_cols) *= sqrt_rho1_;
|
|
Packit |
ea1746 |
return;
|
|
Packit |
ea1746 |
}
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
// Equation 11 in BANS.
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// J = sqrt(rho) * (J - alpha^2 r * r' J)
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// In days gone by this loop used to be a single Eigen expression of
|
|
Packit |
ea1746 |
// the form
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// J = sqrt_rho1_ * (J - alpha_sq_norm_ * r* (r.transpose() * J));
|
|
Packit |
ea1746 |
//
|
|
Packit |
ea1746 |
// Which turns out to about 17x slower on bal problems. The reason
|
|
Packit |
ea1746 |
// is that Eigen is unable to figure out that this expression can be
|
|
Packit |
ea1746 |
// evaluated columnwise and ends up creating a temporary.
|
|
Packit |
ea1746 |
for (int c = 0; c < num_cols; ++c) {
|
|
Packit |
ea1746 |
double r_transpose_j = 0.0;
|
|
Packit |
ea1746 |
for (int r = 0; r < num_rows; ++r) {
|
|
Packit |
ea1746 |
r_transpose_j += jacobian[r * num_cols + c] * residuals[r];
|
|
Packit |
ea1746 |
}
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
for (int r = 0; r < num_rows; ++r) {
|
|
Packit |
ea1746 |
jacobian[r * num_cols + c] = sqrt_rho1_ *
|
|
Packit |
ea1746 |
(jacobian[r * num_cols + c] -
|
|
Packit |
ea1746 |
alpha_sq_norm_ * residuals[r] * r_transpose_j);
|
|
Packit |
ea1746 |
}
|
|
Packit |
ea1746 |
}
|
|
Packit |
ea1746 |
}
|
|
Packit |
ea1746 |
|
|
Packit |
ea1746 |
} // namespace internal
|
|
Packit |
ea1746 |
} // namespace ceres
|