// Ceres Solver - A fast non-linear least squares minimizer // Copyright 2013 Google Inc. All rights reserved. // http://code.google.com/p/ceres-solver/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Google Inc. nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Author: sameeragarwal@google.com (Sameer Agarwal) // mierle@gmail.com (Keir Mierle) // // Finite differencing routine used by NumericDiffCostFunction. #ifndef CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_ #define CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_ #include #include "Eigen/Dense" #include "ceres/cost_function.h" #include "ceres/internal/scoped_ptr.h" #include "ceres/internal/variadic_evaluate.h" #include "ceres/types.h" #include "glog/logging.h" namespace ceres { namespace internal { // Helper templates that allow evaluation of a variadic functor or a // CostFunction object. template bool EvaluateImpl(const CostFunctor* functor, double const* const* parameters, double* residuals, const void* /* NOT USED */) { return VariadicEvaluate::Call( *functor, parameters, residuals); } template bool EvaluateImpl(const CostFunctor* functor, double const* const* parameters, double* residuals, const CostFunction* /* NOT USED */) { return functor->Evaluate(parameters, residuals, NULL); } // This is split from the main class because C++ doesn't allow partial template // specializations for member functions. The alternative is to repeat the main // class for differing numbers of parameters, which is also unfortunate. template struct NumericDiff { // Mutates parameters but must restore them before return. static bool EvaluateJacobianForParameterBlock( const CostFunctor* functor, double const* residuals_at_eval_point, const double relative_step_size, double **parameters, double *jacobian) { using Eigen::Map; using Eigen::Matrix; using Eigen::RowMajor; using Eigen::ColMajor; typedef Matrix ResidualVector; typedef Matrix ParameterVector; typedef Matrix 1) ? ColMajor : RowMajor> JacobianMatrix; Map parameter_jacobian(jacobian, kNumResiduals, kParameterBlockSize); // Mutate 1 element at a time and then restore. Map x_plus_delta(parameters[kParameterBlock], kParameterBlockSize); ParameterVector x(x_plus_delta); ParameterVector step_size = x.array().abs() * relative_step_size; // To handle cases where a parameter is exactly zero, instead use // the mean step_size for the other dimensions. If all the // parameters are zero, there's no good answer. Take // relative_step_size as a guess and hope for the best. const double fallback_step_size = (step_size.sum() == 0) ? relative_step_size : step_size.sum() / step_size.rows(); // For each parameter in the parameter block, use finite differences to // compute the derivative for that parameter. for (int j = 0; j < kParameterBlockSize; ++j) { const double delta = (step_size(j) == 0.0) ? fallback_step_size : step_size(j); x_plus_delta(j) = x(j) + delta; double residuals[kNumResiduals]; // NOLINT if (!EvaluateImpl( functor, parameters, residuals, functor)) { return false; } // Compute this column of the jacobian in 3 steps: // 1. Store residuals for the forward part. // 2. Subtract residuals for the backward (or 0) part. // 3. Divide out the run. parameter_jacobian.col(j) = Map(residuals, kNumResiduals); double one_over_delta = 1.0 / delta; if (kMethod == CENTRAL) { // Compute the function on the other side of x(j). x_plus_delta(j) = x(j) - delta; if (!EvaluateImpl( functor, parameters, residuals, functor)) { return false; } parameter_jacobian.col(j) -= Map(residuals, kNumResiduals, 1); one_over_delta /= 2; } else { // Forward difference only; reuse existing residuals evaluation. parameter_jacobian.col(j) -= Map(residuals_at_eval_point, kNumResiduals); } x_plus_delta(j) = x(j); // Restore x_plus_delta. // Divide out the run to get slope. parameter_jacobian.col(j) *= one_over_delta; } return true; } }; template struct NumericDiff { // Mutates parameters but must restore them before return. static bool EvaluateJacobianForParameterBlock( const CostFunctor* functor, double const* residuals_at_eval_point, const double relative_step_size, double **parameters, double *jacobian) { LOG(FATAL) << "Control should never reach here."; return true; } }; } // namespace internal } // namespace ceres #endif // CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_