aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAngus Kong <shkong@google.com>2013-02-13 14:56:04 -0800
committerAngus Kong <shkong@google.com>2013-02-14 13:33:05 -0800
commit0ae28bd5885b5daa526898fcf7c323dc2c3e1963 (patch)
tree487c4897ae7509fb4e05751b0a673a8090e26d41 /include
parente9e9be9383b479ac1573370ab221ffbac4de8c90 (diff)
downloadceres-solver-0ae28bd5885b5daa526898fcf7c323dc2c3e1963.tar.gz
Added a NOTICE and a MODULE_LICENSE_BSD file. Added Android.mk for master build and unbundled build. Added CleanSpec.mk to optimize Android build. Change-Id: I6cd82bcabc1a94b10239f9fca017de0afd20e769
Diffstat (limited to 'include')
-rw-r--r--include/ceres/autodiff_cost_function.h217
-rw-r--r--include/ceres/ceres.h52
-rw-r--r--include/ceres/conditioned_cost_function.h97
-rw-r--r--include/ceres/cost_function.h127
-rw-r--r--include/ceres/crs_matrix.h65
-rw-r--r--include/ceres/fpclassify.h88
-rw-r--r--include/ceres/gradient_checker.h221
-rw-r--r--include/ceres/internal/autodiff.h450
-rw-r--r--include/ceres/internal/eigen.h80
-rw-r--r--include/ceres/internal/fixed_array.h195
-rw-r--r--include/ceres/internal/macros.h171
-rw-r--r--include/ceres/internal/manual_constructor.h208
-rw-r--r--include/ceres/internal/port.h50
-rw-r--r--include/ceres/internal/scoped_ptr.h311
-rw-r--r--include/ceres/iteration_callback.h200
-rw-r--r--include/ceres/jet.h678
-rw-r--r--include/ceres/local_parameterization.h189
-rw-r--r--include/ceres/loss_function.h397
-rw-r--r--include/ceres/normal_prior.h75
-rw-r--r--include/ceres/numeric_diff_cost_function.h285
-rw-r--r--include/ceres/ordered_groups.h176
-rw-r--r--include/ceres/problem.h286
-rw-r--r--include/ceres/rotation.h534
-rw-r--r--include/ceres/sized_cost_function.h96
-rw-r--r--include/ceres/solver.h654
-rw-r--r--include/ceres/types.h320
26 files changed, 6222 insertions, 0 deletions
diff --git a/include/ceres/autodiff_cost_function.h b/include/ceres/autodiff_cost_function.h
new file mode 100644
index 0000000..bb08d64
--- /dev/null
+++ b/include/ceres/autodiff_cost_function.h
@@ -0,0 +1,217 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Helpers for making CostFunctions as needed by the least squares framework,
+// with Jacobians computed via automatic differentiation. For more information
+// on automatic differentation, see the wikipedia article at
+// http://en.wikipedia.org/wiki/Automatic_differentiation
+//
+// To get an auto differentiated cost function, you must define a class with a
+// templated operator() (a functor) that computes the cost function in terms of
+// the template parameter T. The autodiff framework substitutes appropriate
+// "jet" objects for T in order to compute the derivative when necessary, but
+// this is hidden, and you should write the function as if T were a scalar type
+// (e.g. a double-precision floating point number).
+//
+// The function must write the computed value in the last argument (the only
+// non-const one) and return true to indicate success.
+//
+// For example, consider a scalar error e = k - x'y, where both x and y are
+// two-dimensional column vector parameters, the prime sign indicates
+// transposition, and k is a constant. The form of this error, which is the
+// difference between a constant and an expression, is a common pattern in least
+// squares problems. For example, the value x'y might be the model expectation
+// for a series of measurements, where there is an instance of the cost function
+// for each measurement k.
+//
+// The actual cost added to the total problem is e^2, or (k - x'k)^2; however,
+// the squaring is implicitly done by the optimization framework.
+//
+// To write an auto-differentiable cost function for the above model, first
+// define the object
+//
+// class MyScalarCostFunction {
+// MyScalarCostFunction(double k): k_(k) {}
+//
+// template <typename T>
+// bool operator()(const T* const x , const T* const y, T* e) const {
+// e[0] = T(k_) - x[0] * y[0] + x[1] * y[1];
+// return true;
+// }
+//
+// private:
+// double k_;
+// };
+//
+// Note that in the declaration of operator() the input parameters x and y come
+// first, and are passed as const pointers to arrays of T. If there were three
+// input parameters, then the third input parameter would come after y. The
+// output is always the last parameter, and is also a pointer to an array. In
+// the example above, e is a scalar, so only e[0] is set.
+//
+// Then given this class definition, the auto differentiated cost function for
+// it can be constructed as follows.
+//
+// CostFunction* cost_function
+// = new AutoDiffCostFunction<MyScalarCostFunction, 1, 2, 2>(
+// new MyScalarCostFunction(1.0)); ^ ^ ^
+// | | |
+// Dimension of residual ------+ | |
+// Dimension of x ----------------+ |
+// Dimension of y -------------------+
+//
+// In this example, there is usually an instance for each measumerent of k.
+//
+// In the instantiation above, the template parameters following
+// "MyScalarCostFunction", "1, 2, 2", describe the functor as computing a
+// 1-dimensional output from two arguments, both 2-dimensional.
+//
+// The autodiff cost function also supports cost functions with a
+// runtime-determined number of residuals. For example:
+//
+// CostFunction* cost_function
+// = new AutoDiffCostFunction<MyScalarCostFunction, DYNAMIC, 2, 2>(
+// new CostFunctionWithDynamicNumResiduals(1.0), ^ ^ ^
+// runtime_number_of_residuals); <----+ | | |
+// | | | |
+// | | | |
+// Actual number of residuals ------+ | | |
+// Indicate dynamic number of residuals ---------+ | |
+// Dimension of x -------------------------------------+ |
+// Dimension of y ----------------------------------------+
+//
+// The framework can currently accommodate cost functions of up to 6 independent
+// variables, and there is no limit on the dimensionality of each of them.
+//
+// WARNING #1: Since the functor will get instantiated with different types for
+// T, you must to convert from other numeric types to T before mixing
+// computations with other variables of type T. In the example above, this is
+// seen where instead of using k_ directly, k_ is wrapped with T(k_).
+//
+// WARNING #2: A common beginner's error when first using autodiff cost
+// functions is to get the sizing wrong. In particular, there is a tendency to
+// set the template parameters to (dimension of residual, number of parameters)
+// instead of passing a dimension parameter for *every parameter*. In the
+// example above, that would be <MyScalarCostFunction, 1, 2>, which is missing
+// the last '2' argument. Please be careful when setting the size parameters.
+
+#ifndef CERES_PUBLIC_AUTODIFF_COST_FUNCTION_H_
+#define CERES_PUBLIC_AUTODIFF_COST_FUNCTION_H_
+
+#include <glog/logging.h>
+#include "ceres/internal/autodiff.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+// A cost function which computes the derivative of the cost with respect to
+// the parameters (a.k.a. the jacobian) using an autodifferentiation framework.
+// The first template argument is the functor object, described in the header
+// comment. The second argument is the dimension of the residual (or
+// ceres::DYNAMIC to indicate it will be set at runtime), and subsequent
+// arguments describe the size of the Nth parameter, one per parameter.
+//
+// The constructors take ownership of the cost functor.
+//
+// If the number of residuals (argument "M" below) is ceres::DYNAMIC, then the
+// two-argument constructor must be used. The second constructor takes a number
+// of residuals (in addition to the templated number of residuals). This allows
+// for varying the number of residuals for a single autodiff cost function at
+// runtime.
+template <typename CostFunctor,
+ int M, // Number of residuals, or ceres::DYNAMIC.
+ int N0, // Number of parameters in block 0.
+ int N1 = 0, // Number of parameters in block 1.
+ int N2 = 0, // Number of parameters in block 2.
+ int N3 = 0, // Number of parameters in block 3.
+ int N4 = 0, // Number of parameters in block 4.
+ int N5 = 0, // Number of parameters in block 5.
+ int N6 = 0, // Number of parameters in block 6.
+ int N7 = 0, // Number of parameters in block 7.
+ int N8 = 0, // Number of parameters in block 8.
+ int N9 = 0> // Number of parameters in block 9.
+class AutoDiffCostFunction :
+ public SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> {
+ public:
+ // Takes ownership of functor. Uses the template-provided value for the
+ // number of residuals ("M").
+ explicit AutoDiffCostFunction(CostFunctor* functor)
+ : functor_(functor) {
+ CHECK_NE(M, DYNAMIC) << "Can't run the fixed-size constructor if the "
+ << "number of residuals is set to ceres::DYNAMIC.";
+ }
+
+ // Takes ownership of functor. Ignores the template-provided number of
+ // residuals ("M") in favor of the "num_residuals" argument provided.
+ //
+ // This allows for having autodiff cost functions which return varying
+ // numbers of residuals at runtime.
+ AutoDiffCostFunction(CostFunctor* functor, int num_residuals)
+ : functor_(functor) {
+ CHECK_EQ(M, DYNAMIC) << "Can't run the dynamic-size constructor if the "
+ << "number of residuals is not ceres::DYNAMIC.";
+ SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>
+ ::set_num_residuals(num_residuals);
+ }
+
+ virtual ~AutoDiffCostFunction() {}
+
+ // Implementation details follow; clients of the autodiff cost function should
+ // not have to examine below here.
+ //
+ // To handle varardic cost functions, some template magic is needed. It's
+ // mostly hidden inside autodiff.h.
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ if (!jacobians) {
+ return internal::VariadicEvaluate<
+ CostFunctor, double, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>
+ ::Call(*functor_, parameters, residuals);
+ }
+ return internal::AutoDiff<CostFunctor, double,
+ N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Differentiate(
+ *functor_,
+ parameters,
+ SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>
+ ::num_residuals(),
+ residuals,
+ jacobians);
+ }
+
+ private:
+ internal::scoped_ptr<CostFunctor> functor_;
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_AUTODIFF_COST_FUNCTION_H_
diff --git a/include/ceres/ceres.h b/include/ceres/ceres.h
new file mode 100644
index 0000000..9b8f212
--- /dev/null
+++ b/include/ceres/ceres.h
@@ -0,0 +1,52 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// This is a forwarding header containing the public symbols exported from
+// Ceres. Anything in the "ceres" namespace is available for use.
+
+#ifndef CERES_PUBLIC_CERES_H_
+#define CERES_PUBLIC_CERES_H_
+
+#define CERES_VERSION 1.4.0
+#define CERES_ABI_VERSION 1.4.0
+
+#include "ceres/autodiff_cost_function.h"
+#include "ceres/cost_function.h"
+#include "ceres/iteration_callback.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/loss_function.h"
+#include "ceres/numeric_diff_cost_function.h"
+#include "ceres/ordered_groups.h"
+#include "ceres/problem.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/solver.h"
+#include "ceres/types.h"
+
+#endif // CERES_PUBLIC_CERES_H_
diff --git a/include/ceres/conditioned_cost_function.h b/include/ceres/conditioned_cost_function.h
new file mode 100644
index 0000000..498d36e
--- /dev/null
+++ b/include/ceres/conditioned_cost_function.h
@@ -0,0 +1,97 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wjr@google.com (William Rucklidge)
+//
+// This file contains a cost function that can apply a transformation to
+// each residual value before they are square-summed.
+
+#ifndef CERES_PUBLIC_CONDITIONED_COST_FUNCTION_H_
+#define CERES_PUBLIC_CONDITIONED_COST_FUNCTION_H_
+
+#include <vector>
+
+#include "ceres/cost_function.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+// This class allows you to apply different conditioning to the residual
+// values of a wrapped cost function. An example where this is useful is
+// where you have an existing cost function that produces N values, but you
+// want the total cost to be something other than just the sum of these
+// squared values - maybe you want to apply a different scaling to some
+// values, to change their contribution to the cost.
+//
+// Usage:
+//
+// // my_cost_function produces N residuals
+// CostFunction* my_cost_function = ...
+// CHECK_EQ(N, my_cost_function->num_residuals());
+// vector<CostFunction*> conditioners;
+//
+// // Make N 1x1 cost functions (1 parameter, 1 residual)
+// CostFunction* f_1 = ...
+// conditioners.push_back(f_1);
+// ...
+// CostFunction* f_N = ...
+// conditioners.push_back(f_N);
+// ConditionedCostFunction* ccf =
+// new ConditionedCostFunction(my_cost_function, conditioners);
+//
+// Now ccf's residual i (i=0..N-1) will be passed though the i'th conditioner.
+//
+// ccf_residual[i] = f_i(my_cost_function_residual[i])
+//
+// and the Jacobian will be affected appropriately.
+class ConditionedCostFunction : public CostFunction {
+ public:
+ // Builds a cost function based on a wrapped cost function, and a
+ // per-residual conditioner. Takes ownership of all of the wrapped cost
+ // functions, or not, depending on the ownership parameter. Conditioners
+ // may be NULL, in which case the corresponding residual is not modified.
+ ConditionedCostFunction(CostFunction* wrapped_cost_function,
+ const vector<CostFunction*>& conditioners,
+ Ownership ownership);
+ virtual ~ConditionedCostFunction();
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const;
+
+ private:
+ internal::scoped_ptr<CostFunction> wrapped_cost_function_;
+ vector<CostFunction*> conditioners_;
+ Ownership ownership_;
+};
+
+} // namespace ceres
+
+
+#endif // CERES_PUBLIC_CONDITIONED_COST_FUNCTION_H_
diff --git a/include/ceres/cost_function.h b/include/ceres/cost_function.h
new file mode 100644
index 0000000..9b010f7
--- /dev/null
+++ b/include/ceres/cost_function.h
@@ -0,0 +1,127 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+// keir@google.m (Keir Mierle)
+//
+// This is the interface through which the least squares solver accesses the
+// residual and Jacobian of the least squares problem. Users are expected to
+// subclass CostFunction to define their own terms in the least squares problem.
+//
+// It is recommended that users define templated residual functors for use as
+// arguments for AutoDiffCostFunction (see autodiff_cost_function.h), instead of
+// directly implementing the CostFunction interface. This often results in both
+// shorter code and faster execution than hand-coded derivatives. However,
+// specialized cases may demand direct implementation of the lower-level
+// CostFunction interface; for example, this is true when calling legacy code
+// which is not templated on numeric types.
+
+#ifndef CERES_PUBLIC_COST_FUNCTION_H_
+#define CERES_PUBLIC_COST_FUNCTION_H_
+
+#include <vector>
+#include "ceres/internal/macros.h"
+#include "ceres/internal/port.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+// This class implements the computation of the cost (a.k.a. residual) terms as
+// a function of the input (control) variables, and is the interface for users
+// to describe their least squares problem to Ceres. In other words, this is the
+// modelling layer between users and the Ceres optimizer. The signature of the
+// function (number and sizes of input parameter blocks and number of outputs)
+// is stored in parameter_block_sizes_ and num_residuals_ respectively. User
+// code inheriting from this class is expected to set these two members with the
+// corresponding accessors. This information will be verified by the Problem
+// when added with AddResidualBlock().
+class CostFunction {
+ public:
+ CostFunction() : num_residuals_(0) {}
+
+ virtual ~CostFunction() {}
+
+ // Inputs:
+ //
+ // parameters is an array of pointers to arrays containing the
+ // various parameter blocks. parameters has the same number of
+ // elements as parameter_block_sizes_. Parameter blocks are in the
+ // same order as parameter_block_sizes_.i.e.,
+ //
+ // parameters_[i] = double[parameter_block_sizes_[i]]
+ //
+ // Outputs:
+ //
+ // residuals is an array of size num_residuals_.
+ //
+ // jacobians is an array of size parameter_block_sizes_ containing
+ // pointers to storage for jacobian blocks corresponding to each
+ // parameter block. Jacobian blocks are in the same order as
+ // parameter_block_sizes, i.e. jacobians[i], is an
+ // array that contains num_residuals_* parameter_block_sizes_[i]
+ // elements. Each jacobian block is stored in row-major order, i.e.,
+ //
+ // jacobians[i][r*parameter_block_size_[i] + c] =
+ // d residual[r] / d parameters[i][c]
+ //
+ // If jacobians is NULL, then no derivatives are returned; this is
+ // the case when computing cost only. If jacobians[i] is NULL, then
+ // the jacobian block corresponding to the i'th parameter block must
+ // not to be returned.
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const = 0;
+
+ const vector<int16>& parameter_block_sizes() const {
+ return parameter_block_sizes_;
+ }
+
+ int num_residuals() const {
+ return num_residuals_;
+ }
+
+ protected:
+ vector<int16>* mutable_parameter_block_sizes() {
+ return &parameter_block_sizes_;
+ }
+
+ void set_num_residuals(int num_residuals) {
+ num_residuals_ = num_residuals;
+ }
+
+ private:
+ // Cost function signature metadata: number of inputs & their sizes,
+ // number of outputs (residuals).
+ vector<int16> parameter_block_sizes_;
+ int num_residuals_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(CostFunction);
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_COST_FUNCTION_H_
diff --git a/include/ceres/crs_matrix.h b/include/ceres/crs_matrix.h
new file mode 100644
index 0000000..c9fe8f7
--- /dev/null
+++ b/include/ceres/crs_matrix.h
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_PUBLIC_CRS_MATRIX_H_
+#define CERES_PUBLIC_CRS_MATRIX_H_
+
+#include <vector>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+// A compressed row sparse matrix used primarily for communicating the
+// Jacobian matrix to the user.
+struct CRSMatrix {
+ CRSMatrix() : num_rows(0), num_cols(0) {}
+
+ int num_rows;
+ int num_cols;
+
+ // A compressed row matrix stores its contents in three arrays.
+ // The non-zero pattern of the i^th row is given by
+ //
+ // rows[cols[i] ... cols[i + 1]]
+ //
+ // and the corresponding values by
+ //
+ // values[cols[i] ... cols[i + 1]]
+ //
+ // Thus, cols is a vector of size num_cols + 1, and rows and values
+ // have as many entries as number of non-zeros in the matrix.
+ vector<int> cols;
+ vector<int> rows;
+ vector<double> values;
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_CRS_MATRIX_H_
diff --git a/include/ceres/fpclassify.h b/include/ceres/fpclassify.h
new file mode 100644
index 0000000..5a9ea15
--- /dev/null
+++ b/include/ceres/fpclassify.h
@@ -0,0 +1,88 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Portable floating point classification. The names are picked such that they
+// do not collide with macros. For example, "isnan" in C99 is a macro and hence
+// does not respect namespaces.
+//
+// TODO(keir): Finish porting!
+
+#ifndef CERES_PUBLIC_FPCLASSIFY_H_
+#define CERES_PUBLIC_FPCLASSIFY_H_
+
+#if defined(_MSC_VER)
+#include <float.h>
+#endif
+
+namespace ceres {
+
+#if defined(_MSC_VER)
+inline bool IsFinite (double x) { return _finite(x); }
+inline bool IsInfinite(double x) { return !_finite(x) && !_isnan(x); }
+inline bool IsNaN (double x) { return _isnan(x); }
+inline bool IsNormal (double x) {
+ int classification = _fpclass(x);
+ return classification == _FPCLASS_NN ||
+ classification == _FPCLASS_PN;
+}
+#elif defined(ANDROID)
+
+// On Android when using the GNU STL, the C++ fpclassify functions are not
+// available. Strictly speaking, the std functions are are not standard until
+// C++11. Instead use the C99 macros on Android.
+inline bool IsNaN (double x) { return isnan(x); }
+inline bool IsNormal (double x) { return isnormal(x); }
+
+// On Android NDK r6, when using STLPort, the isinf and isfinite functions are
+// not available, so reimplement them.
+# if defined(_STLPORT_VERSION)
+inline bool IsInfinite(double x) {
+ return x == std::numeric_limits<double>::infinity() ||
+ x == -std::numeric_limits<double>::infinity();
+}
+inline bool IsFinite(double x) {
+ return !isnan(x) && !IsInfinite(x);
+}
+# else
+inline bool IsFinite (double x) { return isfinite(x); }
+inline bool IsInfinite(double x) { return isinf(x); }
+# endif // defined(_STLPORT_VERSION)
+#else
+// These definitions are for the normal Unix suspects.
+// TODO(keir): Test the "else" with more platforms.
+inline bool IsFinite (double x) { return std::isfinite(x); }
+inline bool IsInfinite(double x) { return std::isinf(x); }
+inline bool IsNaN (double x) { return std::isnan(x); }
+inline bool IsNormal (double x) { return std::isnormal(x); }
+#endif
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_FPCLASSIFY_H_
diff --git a/include/ceres/gradient_checker.h b/include/ceres/gradient_checker.h
new file mode 100644
index 0000000..2ce605d
--- /dev/null
+++ b/include/ceres/gradient_checker.h
@@ -0,0 +1,221 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+// Copyright 2007 Google Inc. All Rights Reserved.
+//
+// Author: wjr@google.com (William Rucklidge)
+//
+// This file contains a class that exercises a cost function, to make sure
+// that it is computing reasonable derivatives. It compares the Jacobians
+// computed by the cost function with those obtained by finite
+// differences.
+
+#ifndef CERES_PUBLIC_GRADIENT_CHECKER_H_
+#define CERES_PUBLIC_GRADIENT_CHECKER_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <vector>
+
+#include <glog/logging.h>
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/fixed_array.h"
+#include "ceres/internal/macros.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/numeric_diff_cost_function.h"
+
+namespace ceres {
+
+// An object that exercises a cost function, to compare the answers that it
+// gives with derivatives estimated using finite differencing.
+//
+// The only likely usage of this is for testing.
+//
+// How to use: Fill in an array of pointers to parameter blocks for your
+// CostFunction, and then call Probe(). Check that the return value is
+// 'true'. See prober_test.cc for an example.
+//
+// This is templated similarly to NumericDiffCostFunction, as it internally
+// uses that.
+template <typename CostFunctionToProbe,
+ int M = 0, int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0>
+class GradientChecker {
+ public:
+ // Here we stash some results from the probe, for later
+ // inspection.
+ struct GradientCheckResults {
+ // Computed cost.
+ Vector cost;
+
+ // The sizes of these matrices are dictated by the cost function's
+ // parameter and residual block sizes. Each vector's length will
+ // term->parameter_block_sizes().size(), and each matrix is the
+ // Jacobian of the residual with respect to the corresponding parameter
+ // block.
+
+ // Derivatives as computed by the cost function.
+ vector<Matrix> term_jacobians;
+
+ // Derivatives as computed by finite differencing.
+ vector<Matrix> finite_difference_jacobians;
+
+ // Infinity-norm of term_jacobians - finite_difference_jacobians.
+ double error_jacobians;
+ };
+
+ // Checks the Jacobian computed by a cost function.
+ //
+ // probe_point: The parameter values at which to probe.
+ // error_tolerance: A threshold for the infinity-norm difference
+ // between the Jacobians. If the Jacobians differ by more than
+ // this amount, then the probe fails.
+ //
+ // term: The cost function to test. Not retained after this call returns.
+ //
+ // results: On return, the two Jacobians (and other information)
+ // will be stored here. May be NULL.
+ //
+ // Returns true if no problems are detected and the difference between the
+ // Jacobians is less than error_tolerance.
+ static bool Probe(double const* const* probe_point,
+ double error_tolerance,
+ CostFunctionToProbe *term,
+ GradientCheckResults* results) {
+ CHECK_NOTNULL(probe_point);
+ CHECK_NOTNULL(term);
+ LOG(INFO) << "-------------------- Starting Probe() --------------------";
+
+ // We need a GradientCheckeresults, whether or not they supplied one.
+ internal::scoped_ptr<GradientCheckResults> owned_results;
+ if (results == NULL) {
+ owned_results.reset(new GradientCheckResults);
+ results = owned_results.get();
+ }
+
+ // Do a consistency check between the term and the template parameters.
+ CHECK_EQ(M, term->num_residuals());
+ const int num_residuals = M;
+ const vector<int16>& block_sizes = term->parameter_block_sizes();
+ const int num_blocks = block_sizes.size();
+
+ CHECK_LE(num_blocks, 5) << "Unable to test functions that take more "
+ << "than 5 parameter blocks";
+ if (N0) {
+ CHECK_EQ(N0, block_sizes[0]);
+ CHECK_GE(num_blocks, 1);
+ } else {
+ CHECK_LT(num_blocks, 1);
+ }
+ if (N1) {
+ CHECK_EQ(N1, block_sizes[1]);
+ CHECK_GE(num_blocks, 2);
+ } else {
+ CHECK_LT(num_blocks, 2);
+ }
+ if (N2) {
+ CHECK_EQ(N2, block_sizes[2]);
+ CHECK_GE(num_blocks, 3);
+ } else {
+ CHECK_LT(num_blocks, 3);
+ }
+ if (N3) {
+ CHECK_EQ(N3, block_sizes[3]);
+ CHECK_GE(num_blocks, 4);
+ } else {
+ CHECK_LT(num_blocks, 4);
+ }
+ if (N4) {
+ CHECK_EQ(N4, block_sizes[4]);
+ CHECK_GE(num_blocks, 5);
+ } else {
+ CHECK_LT(num_blocks, 5);
+ }
+
+ results->term_jacobians.clear();
+ results->term_jacobians.resize(num_blocks);
+ results->finite_difference_jacobians.clear();
+ results->finite_difference_jacobians.resize(num_blocks);
+
+ internal::FixedArray<double*> term_jacobian_pointers(num_blocks);
+ internal::FixedArray<double*> finite_difference_jacobian_pointers(num_blocks);
+ for (int i = 0; i < num_blocks; i++) {
+ results->term_jacobians[i].resize(num_residuals, block_sizes[i]);
+ term_jacobian_pointers[i] = results->term_jacobians[i].data();
+ results->finite_difference_jacobians[i].resize(
+ num_residuals, block_sizes[i]);
+ finite_difference_jacobian_pointers[i] =
+ results->finite_difference_jacobians[i].data();
+ }
+ results->cost.resize(num_residuals, 1);
+
+ CHECK(term->Evaluate(probe_point, results->cost.data(),
+ term_jacobian_pointers.get()));
+ NumericDiffCostFunction<CostFunctionToProbe, CENTRAL, M, N0, N1, N2, N3, N4>
+ numeric_term(term, DO_NOT_TAKE_OWNERSHIP);
+ CHECK(numeric_term.Evaluate(probe_point, results->cost.data(),
+ finite_difference_jacobian_pointers.get()));
+
+ results->error_jacobians = 0;
+ for (int i = 0; i < num_blocks; i++) {
+ Matrix jacobian_difference = results->term_jacobians[i] -
+ results->finite_difference_jacobians[i];
+ results->error_jacobians =
+ std::max(results->error_jacobians,
+ jacobian_difference.lpNorm<Eigen::Infinity>());
+ }
+
+ LOG(INFO) << "========== term-computed derivatives ==========";
+ for (int i = 0; i < num_blocks; i++) {
+ LOG(INFO) << "term_computed block " << i;
+ LOG(INFO) << "\n" << results->term_jacobians[i];
+ }
+
+ LOG(INFO) << "========== finite-difference derivatives ==========";
+ for (int i = 0; i < num_blocks; i++) {
+ LOG(INFO) << "finite_difference block " << i;
+ LOG(INFO) << "\n" << results->finite_difference_jacobians[i];
+ }
+
+ LOG(INFO) << "========== difference ==========";
+ for (int i = 0; i < num_blocks; i++) {
+ LOG(INFO) << "difference block " << i;
+ LOG(INFO) << (results->term_jacobians[i] -
+ results->finite_difference_jacobians[i]);
+ }
+
+ LOG(INFO) << "||difference|| = " << results->error_jacobians;
+
+ return results->error_jacobians < error_tolerance;
+ }
+
+ private:
+ CERES_DISALLOW_IMPLICIT_CONSTRUCTORS(GradientChecker);
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_GRADIENT_CHECKER_H_
diff --git a/include/ceres/internal/autodiff.h b/include/ceres/internal/autodiff.h
new file mode 100644
index 0000000..581e881
--- /dev/null
+++ b/include/ceres/internal/autodiff.h
@@ -0,0 +1,450 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Computation of the Jacobian matrix for vector-valued functions of multiple
+// variables, using automatic differentiation based on the implementation of
+// dual numbers in jet.h. Before reading the rest of this file, it is adivsable
+// to read jet.h's header comment in detail.
+//
+// The helper wrapper AutoDiff::Differentiate() computes the jacobian of
+// functors with templated operator() taking this form:
+//
+// struct F {
+// template<typename T>
+// bool operator(const T *x, const T *y, ..., T *z) {
+// // Compute z[] based on x[], y[], ...
+// // return true if computation succeeded, false otherwise.
+// }
+// };
+//
+// All inputs and outputs may be vector-valued.
+//
+// To understand how jets are used to compute the jacobian, a
+// picture may help. Consider a vector-valued function, F, returning 3
+// dimensions and taking a vector-valued parameter of 4 dimensions:
+//
+// y x
+// [ * ] F [ * ]
+// [ * ] <--- [ * ]
+// [ * ] [ * ]
+// [ * ]
+//
+// Similar to the 2-parameter example for f described in jet.h, computing the
+// jacobian dy/dx is done by substutiting a suitable jet object for x and all
+// intermediate steps of the computation of F. Since x is has 4 dimensions, use
+// a Jet<double, 4>.
+//
+// Before substituting a jet object for x, the dual components are set
+// appropriately for each dimension of x:
+//
+// y x
+// [ * | * * * * ] f [ * | 1 0 0 0 ] x0
+// [ * | * * * * ] <--- [ * | 0 1 0 0 ] x1
+// [ * | * * * * ] [ * | 0 0 1 0 ] x2
+// ---+--- [ * | 0 0 0 1 ] x3
+// | ^ ^ ^ ^
+// dy/dx | | | +----- infinitesimal for x3
+// | | +------- infinitesimal for x2
+// | +--------- infinitesimal for x1
+// +----------- infinitesimal for x0
+//
+// The reason to set the internal 4x4 submatrix to the identity is that we wish
+// to take the derivative of y separately with respect to each dimension of x.
+// Each column of the 4x4 identity is therefore for a single component of the
+// independent variable x.
+//
+// Then the jacobian of the mapping, dy/dx, is the 3x4 sub-matrix of the
+// extended y vector, indicated in the above diagram.
+//
+// Functors with multiple parameters
+// ---------------------------------
+// In practice, it is often convenient to use a function f of two or more
+// vector-valued parameters, for example, x[3] and z[6]. Unfortunately, the jet
+// framework is designed for a single-parameter vector-valued input. The wrapper
+// in this file addresses this issue adding support for functions with one or
+// more parameter vectors.
+//
+// To support multiple parameters, all the parameter vectors are concatenated
+// into one and treated as a single parameter vector, except that since the
+// functor expects different inputs, we need to construct the jets as if they
+// were part of a single parameter vector. The extended jets are passed
+// separately for each parameter.
+//
+// For example, consider a functor F taking two vector parameters, p[2] and
+// q[3], and producing an output y[4]:
+//
+// struct F {
+// template<typename T>
+// bool operator(const T *p, const T *q, T *z) {
+// // ...
+// }
+// };
+//
+// In this case, the necessary jet type is Jet<double, 5>. Here is a
+// visualization of the jet objects in this case:
+//
+// Dual components for p ----+
+// |
+// -+-
+// y [ * | 1 0 | 0 0 0 ] --- p[0]
+// [ * | 0 1 | 0 0 0 ] --- p[1]
+// [ * | . . | + + + ] |
+// [ * | . . | + + + ] v
+// [ * | . . | + + + ] <--- F(p, q)
+// [ * | . . | + + + ] ^
+// ^^^ ^^^^^ |
+// dy/dp dy/dq [ * | 0 0 | 1 0 0 ] --- q[0]
+// [ * | 0 0 | 0 1 0 ] --- q[1]
+// [ * | 0 0 | 0 0 1 ] --- q[2]
+// --+--
+// |
+// Dual components for q --------------+
+//
+// where the 4x2 submatrix (marked with ".") and 4x3 submatrix (marked with "+"
+// of y in the above diagram are the derivatives of y with respect to p and q
+// respectively. This is how autodiff works for functors taking multiple vector
+// valued arguments (up to 6).
+//
+// Jacobian NULL pointers
+// ----------------------
+// In general, the functions below will accept NULL pointers for all or some of
+// the Jacobian parameters, meaning that those Jacobians will not be computed.
+
+#ifndef CERES_PUBLIC_INTERNAL_AUTODIFF_H_
+#define CERES_PUBLIC_INTERNAL_AUTODIFF_H_
+
+#include <stddef.h>
+
+#include <glog/logging.h>
+#include "ceres/jet.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/fixed_array.h"
+
+namespace ceres {
+namespace internal {
+
+// Extends src by a 1st order pertubation for every dimension and puts it in
+// dst. The size of src is N. Since this is also used for perturbations in
+// blocked arrays, offset is used to shift which part of the jet the
+// perturbation occurs. This is used to set up the extended x augmented by an
+// identity matrix. The JetT type should be a Jet type, and T should be a
+// numeric type (e.g. double). For example,
+//
+// 0 1 2 3 4 5 6 7 8
+// dst[0] [ * | . . | 1 0 0 | . . . ]
+// dst[1] [ * | . . | 0 1 0 | . . . ]
+// dst[2] [ * | . . | 0 0 1 | . . . ]
+//
+// is what would get put in dst if N was 3, offset was 3, and the jet type JetT
+// was 8-dimensional.
+template <typename JetT, typename T>
+inline void Make1stOrderPerturbation(int offset, int N, const T *src,
+ JetT *dst) {
+ DCHECK(src);
+ DCHECK(dst);
+ for (int j = 0; j < N; ++j) {
+ dst[j] = JetT(src[j], offset + j);
+ }
+}
+
+// Takes the 0th order part of src, assumed to be a Jet type, and puts it in
+// dst. This is used to pick out the "vector" part of the extended y.
+template <typename JetT, typename T>
+inline void Take0thOrderPart(int M, const JetT *src, T dst) {
+ DCHECK(src);
+ for (int i = 0; i < M; ++i) {
+ dst[i] = src[i].a;
+ }
+}
+
+// Takes N 1st order parts, starting at index N0, and puts them in the M x N
+// matrix 'dst'. This is used to pick out the "matrix" parts of the extended y.
+template <typename JetT, typename T, int N0, int N>
+inline void Take1stOrderPart(const int M, const JetT *src, T *dst) {
+ DCHECK(src);
+ DCHECK(dst);
+ for (int i = 0; i < M; ++i) {
+ Eigen::Map<Eigen::Matrix<T, N, 1> >(dst + N * i, N) = src[i].v.template segment<N>(N0);
+ }
+}
+
+// This block of quasi-repeated code calls the user-supplied functor, which may
+// take a variable number of arguments. This is accomplished by specializing the
+// struct based on the size of the trailing parameters; parameters with 0 size
+// are assumed missing.
+//
+// Supporting variadic functions is the primary source of complexity in the
+// autodiff implementation.
+
+template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
+ int N5, int N6, int N7, int N8, int N9>
+struct VariadicEvaluate {
+ static bool Call(const Functor& functor, T const *const *input, T* output) {
+ return functor(input[0],
+ input[1],
+ input[2],
+ input[3],
+ input[4],
+ input[5],
+ input[6],
+ input[7],
+ input[8],
+ input[9],
+ output);
+ }
+};
+
+template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
+ int N5, int N6, int N7, int N8>
+struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, N7, N8, 0> {
+ static bool Call(const Functor& functor, T const *const *input, T* output) {
+ return functor(input[0],
+ input[1],
+ input[2],
+ input[3],
+ input[4],
+ input[5],
+ input[6],
+ input[7],
+ input[8],
+ output);
+ }
+};
+
+template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
+ int N5, int N6, int N7>
+struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, N7, 0, 0> {
+ static bool Call(const Functor& functor, T const *const *input, T* output) {
+ return functor(input[0],
+ input[1],
+ input[2],
+ input[3],
+ input[4],
+ input[5],
+ input[6],
+ input[7],
+ output);
+ }
+};
+
+template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
+ int N5, int N6>
+struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, 0, 0, 0> {
+ static bool Call(const Functor& functor, T const *const *input, T* output) {
+ return functor(input[0],
+ input[1],
+ input[2],
+ input[3],
+ input[4],
+ input[5],
+ input[6],
+ output);
+ }
+};
+
+template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
+ int N5>
+struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, 0, 0, 0, 0> {
+ static bool Call(const Functor& functor, T const *const *input, T* output) {
+ return functor(input[0],
+ input[1],
+ input[2],
+ input[3],
+ input[4],
+ input[5],
+ output);
+ }
+};
+
+template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4>
+struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, 0, 0, 0, 0, 0> {
+ static bool Call(const Functor& functor, T const *const *input, T* output) {
+ return functor(input[0],
+ input[1],
+ input[2],
+ input[3],
+ input[4],
+ output);
+ }
+};
+
+template<typename Functor, typename T, int N0, int N1, int N2, int N3>
+struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, 0, 0, 0, 0, 0, 0> {
+ static bool Call(const Functor& functor, T const *const *input, T* output) {
+ return functor(input[0],
+ input[1],
+ input[2],
+ input[3],
+ output);
+ }
+};
+
+template<typename Functor, typename T, int N0, int N1, int N2>
+struct VariadicEvaluate<Functor, T, N0, N1, N2, 0, 0, 0, 0, 0, 0, 0> {
+ static bool Call(const Functor& functor, T const *const *input, T* output) {
+ return functor(input[0],
+ input[1],
+ input[2],
+ output);
+ }
+};
+
+template<typename Functor, typename T, int N0, int N1>
+struct VariadicEvaluate<Functor, T, N0, N1, 0, 0, 0, 0, 0, 0, 0, 0> {
+ static bool Call(const Functor& functor, T const *const *input, T* output) {
+ return functor(input[0],
+ input[1],
+ output);
+ }
+};
+
+template<typename Functor, typename T, int N0>
+struct VariadicEvaluate<Functor, T, N0, 0, 0, 0, 0, 0, 0, 0, 0, 0> {
+ static bool Call(const Functor& functor, T const *const *input, T* output) {
+ return functor(input[0],
+ output);
+ }
+};
+
+// This is in a struct because default template parameters on a function are not
+// supported in C++03 (though it is available in C++0x). N0 through N5 are the
+// dimension of the input arguments to the user supplied functor.
+template <typename Functor, typename T,
+ int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
+ int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
+struct AutoDiff {
+ static bool Differentiate(const Functor& functor,
+ T const *const *parameters,
+ int num_outputs,
+ T *function_value,
+ T **jacobians) {
+ // This block breaks the 80 column rule to keep it somewhat readable.
+ DCHECK_GT(num_outputs, 0);
+ CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0)))
+ << "Zero block cannot precede a non-zero block. Block sizes are "
+ << "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
+ << N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
+ << N8 << ", " << N9;
+
+ typedef Jet<T, N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9> JetT;
+ FixedArray<JetT, (256 * 7) / sizeof(JetT)> x(
+ N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9 + num_outputs);
+
+ // These are the positions of the respective jets in the fixed array x.
+ const int jet0 = 0;
+ const int jet1 = N0;
+ const int jet2 = N0 + N1;
+ const int jet3 = N0 + N1 + N2;
+ const int jet4 = N0 + N1 + N2 + N3;
+ const int jet5 = N0 + N1 + N2 + N3 + N4;
+ const int jet6 = N0 + N1 + N2 + N3 + N4 + N5;
+ const int jet7 = N0 + N1 + N2 + N3 + N4 + N5 + N6;
+ const int jet8 = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7;
+ const int jet9 = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8;
+
+ const JetT *unpacked_parameters[10] = {
+ x.get() + jet0,
+ x.get() + jet1,
+ x.get() + jet2,
+ x.get() + jet3,
+ x.get() + jet4,
+ x.get() + jet5,
+ x.get() + jet6,
+ x.get() + jet7,
+ x.get() + jet8,
+ x.get() + jet9,
+ };
+ JetT *output = x.get() + jet6;
+
+#define CERES_MAKE_1ST_ORDER_PERTURBATION(i) \
+ if (N ## i) { \
+ internal::Make1stOrderPerturbation(jet ## i, \
+ N ## i, \
+ parameters[i], \
+ x.get() + jet ## i); \
+ }
+ CERES_MAKE_1ST_ORDER_PERTURBATION(0);
+ CERES_MAKE_1ST_ORDER_PERTURBATION(1);
+ CERES_MAKE_1ST_ORDER_PERTURBATION(2);
+ CERES_MAKE_1ST_ORDER_PERTURBATION(3);
+ CERES_MAKE_1ST_ORDER_PERTURBATION(4);
+ CERES_MAKE_1ST_ORDER_PERTURBATION(5);
+ CERES_MAKE_1ST_ORDER_PERTURBATION(6);
+ CERES_MAKE_1ST_ORDER_PERTURBATION(7);
+ CERES_MAKE_1ST_ORDER_PERTURBATION(8);
+ CERES_MAKE_1ST_ORDER_PERTURBATION(9);
+#undef CERES_MAKE_1ST_ORDER_PERTURBATION
+
+ if (!VariadicEvaluate<Functor, JetT,
+ N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Call(
+ functor, unpacked_parameters, output)) {
+ return false;
+ }
+
+ internal::Take0thOrderPart(num_outputs, output, function_value);
+
+#define CERES_TAKE_1ST_ORDER_PERTURBATION(i) \
+ if (N ## i) { \
+ if (jacobians[i]) { \
+ internal::Take1stOrderPart<JetT, T, \
+ jet ## i, \
+ N ## i>(num_outputs, \
+ output, \
+ jacobians[i]); \
+ } \
+ }
+ CERES_TAKE_1ST_ORDER_PERTURBATION(0);
+ CERES_TAKE_1ST_ORDER_PERTURBATION(1);
+ CERES_TAKE_1ST_ORDER_PERTURBATION(2);
+ CERES_TAKE_1ST_ORDER_PERTURBATION(3);
+ CERES_TAKE_1ST_ORDER_PERTURBATION(4);
+ CERES_TAKE_1ST_ORDER_PERTURBATION(5);
+ CERES_TAKE_1ST_ORDER_PERTURBATION(6);
+ CERES_TAKE_1ST_ORDER_PERTURBATION(7);
+ CERES_TAKE_1ST_ORDER_PERTURBATION(8);
+ CERES_TAKE_1ST_ORDER_PERTURBATION(9);
+#undef CERES_TAKE_1ST_ORDER_PERTURBATION
+ return true;
+ }
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_PUBLIC_INTERNAL_AUTODIFF_H_
diff --git a/include/ceres/internal/eigen.h b/include/ceres/internal/eigen.h
new file mode 100644
index 0000000..be76f9e
--- /dev/null
+++ b/include/ceres/internal/eigen.h
@@ -0,0 +1,80 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_EIGEN_H_
+#define CERES_INTERNAL_EIGEN_H_
+
+#include "Eigen/Core"
+
+namespace ceres {
+
+using Eigen::Dynamic;
+using Eigen::RowMajor;
+
+typedef Eigen::Matrix<double, Dynamic, 1> Vector;
+typedef Eigen::Matrix<double, Dynamic, Dynamic, RowMajor> Matrix;
+typedef Eigen::Map<Vector> VectorRef;
+typedef Eigen::Map<Matrix> MatrixRef;
+typedef Eigen::Map<Matrix, Eigen::Aligned> AlignedMatrixRef;
+typedef Eigen::Map<const Vector> ConstVectorRef;
+typedef Eigen::Map<const Matrix, Eigen::Aligned> ConstAlignedMatrixRef;
+typedef Eigen::Map<const Matrix> ConstMatrixRef;
+
+// C++ does not support templated typdefs, thus the need for this
+// struct so that we can support statically sized Matrix and Maps.
+template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic>
+struct EigenTypes {
+ typedef Eigen::Matrix <double, num_rows, num_cols, RowMajor>
+ Matrix;
+
+ typedef Eigen::Map<
+ Eigen::Matrix<double, num_rows, num_cols, RowMajor> >
+ MatrixRef;
+
+ typedef Eigen::Matrix <double, num_rows, 1>
+ Vector;
+
+ typedef Eigen::Map <
+ Eigen::Matrix<double, num_rows, 1> >
+ VectorRef;
+
+
+ typedef Eigen::Map<
+ const Eigen::Matrix<double, num_rows, num_cols, RowMajor> >
+ ConstMatrixRef;
+
+ typedef Eigen::Map <
+ const Eigen::Matrix<double, num_rows, 1> >
+ ConstVectorRef;
+};
+
+} // namespace ceres
+
+#endif // CERES_INTERNAL_EIGEN_H_
diff --git a/include/ceres/internal/fixed_array.h b/include/ceres/internal/fixed_array.h
new file mode 100644
index 0000000..fa4a339
--- /dev/null
+++ b/include/ceres/internal/fixed_array.h
@@ -0,0 +1,195 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: rennie@google.com (Jeffrey Rennie)
+// Author: sanjay@google.com (Sanjay Ghemawat) -- renamed to FixedArray
+
+#ifndef CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
+#define CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
+
+#include <cstddef>
+#include <glog/logging.h>
+#include "Eigen/Core"
+#include "ceres/internal/macros.h"
+#include "ceres/internal/manual_constructor.h"
+
+namespace ceres {
+namespace internal {
+
+// A FixedArray<T> represents a non-resizable array of T where the
+// length of the array does not need to be a compile time constant.
+//
+// FixedArray allocates small arrays inline, and large arrays on
+// the heap. It is a good replacement for non-standard and deprecated
+// uses of alloca() and variable length arrays (a GCC extension).
+//
+// FixedArray keeps performance fast for small arrays, because it
+// avoids heap operations. It also helps reduce the chances of
+// accidentally overflowing your stack if large input is passed to
+// your function.
+//
+// Also, FixedArray is useful for writing portable code. Not all
+// compilers support arrays of dynamic size.
+
+// Most users should not specify an inline_elements argument and let
+// FixedArray<> automatically determine the number of elements
+// to store inline based on sizeof(T).
+//
+// If inline_elements is specified, the FixedArray<> implementation
+// will store arrays of length <= inline_elements inline.
+//
+// Finally note that unlike vector<T> FixedArray<T> will not zero-initialize
+// simple types like int, double, bool, etc.
+//
+// Non-POD types will be default-initialized just like regular vectors or
+// arrays.
+
+#if defined(_WIN64)
+ typedef __int64 ssize_t;
+#elif defined(_WIN32)
+ typedef __int32 ssize_t;
+#endif
+
+template <typename T, ssize_t inline_elements = -1>
+class FixedArray {
+ public:
+ // For playing nicely with stl:
+ typedef T value_type;
+ typedef T* iterator;
+ typedef T const* const_iterator;
+ typedef T& reference;
+ typedef T const& const_reference;
+ typedef T* pointer;
+ typedef std::ptrdiff_t difference_type;
+ typedef size_t size_type;
+
+ // REQUIRES: n >= 0
+ // Creates an array object that can store "n" elements.
+ //
+ // FixedArray<T> will not zero-initialiaze POD (simple) types like int,
+ // double, bool, etc.
+ // Non-POD types will be default-initialized just like regular vectors or
+ // arrays.
+ explicit FixedArray(size_type n);
+
+ // Releases any resources.
+ ~FixedArray();
+
+ // Returns the length of the array.
+ inline size_type size() const { return size_; }
+
+ // Returns the memory size of the array in bytes.
+ inline size_t memsize() const { return size_ * sizeof(T); }
+
+ // Returns a pointer to the underlying element array.
+ inline const T* get() const { return &array_[0].element; }
+ inline T* get() { return &array_[0].element; }
+
+ // REQUIRES: 0 <= i < size()
+ // Returns a reference to the "i"th element.
+ inline T& operator[](size_type i) {
+ DCHECK_GE(i, 0);
+ DCHECK_LT(i, size_);
+ return array_[i].element;
+ }
+
+ // REQUIRES: 0 <= i < size()
+ // Returns a reference to the "i"th element.
+ inline const T& operator[](size_type i) const {
+ DCHECK_GE(i, 0);
+ DCHECK_LT(i, size_);
+ return array_[i].element;
+ }
+
+ inline iterator begin() { return &array_[0].element; }
+ inline iterator end() { return &array_[size_].element; }
+
+ inline const_iterator begin() const { return &array_[0].element; }
+ inline const_iterator end() const { return &array_[size_].element; }
+
+ private:
+ // Container to hold elements of type T. This is necessary to handle
+ // the case where T is a a (C-style) array. The size of InnerContainer
+ // and T must be the same, otherwise callers' assumptions about use
+ // of this code will be broken.
+ struct InnerContainer {
+ T element;
+ };
+
+ // How many elements should we store inline?
+ // a. If not specified, use a default of 256 bytes (256 bytes
+ // seems small enough to not cause stack overflow or unnecessary
+ // stack pollution, while still allowing stack allocation for
+ // reasonably long character arrays.
+ // b. Never use 0 length arrays (not ISO C++)
+ static const size_type S1 = ((inline_elements < 0)
+ ? (256/sizeof(T)) : inline_elements);
+ static const size_type S2 = (S1 <= 0) ? 1 : S1;
+ static const size_type kInlineElements = S2;
+
+ size_type const size_;
+ InnerContainer* const array_;
+
+ // Allocate some space, not an array of elements of type T, so that we can
+ // skip calling the T constructors and destructors for space we never use.
+ ManualConstructor<InnerContainer> inline_space_[kInlineElements];
+};
+
+// Implementation details follow
+
+template <class T, ssize_t S>
+inline FixedArray<T, S>::FixedArray(typename FixedArray<T, S>::size_type n)
+ : size_(n),
+ array_((n <= kInlineElements
+ ? reinterpret_cast<InnerContainer*>(inline_space_)
+ : new InnerContainer[n])) {
+ DCHECK_GE(n, size_t(0));
+
+ // Construct only the elements actually used.
+ if (array_ == reinterpret_cast<InnerContainer*>(inline_space_)) {
+ for (size_t i = 0; i != size_; ++i) {
+ inline_space_[i].Init();
+ }
+ }
+}
+
+template <class T, ssize_t S>
+inline FixedArray<T, S>::~FixedArray() {
+ if (array_ != reinterpret_cast<InnerContainer*>(inline_space_)) {
+ delete[] array_;
+ } else {
+ for (size_t i = 0; i != size_; ++i) {
+ inline_space_[i].Destroy();
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
diff --git a/include/ceres/internal/macros.h b/include/ceres/internal/macros.h
new file mode 100644
index 0000000..83ec311
--- /dev/null
+++ b/include/ceres/internal/macros.h
@@ -0,0 +1,171 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+//
+// Various Google-specific macros.
+//
+// This code is compiled directly on many platforms, including client
+// platforms like Windows, Mac, and embedded systems. Before making
+// any changes here, make sure that you're not breaking any platforms.
+
+#ifndef CERES_PUBLIC_INTERNAL_MACROS_H_
+#define CERES_PUBLIC_INTERNAL_MACROS_H_
+
+#include <cstddef> // For size_t.
+
+// A macro to disallow the copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+//
+// For disallowing only assign or copy, write the code directly, but declare
+// the intend in a comment, for example:
+//
+// void operator=(const TypeName&); // _DISALLOW_ASSIGN
+
+// Note, that most uses of CERES_DISALLOW_ASSIGN and CERES_DISALLOW_COPY
+// are broken semantically, one should either use disallow both or
+// neither. Try to avoid these in new code.
+#define CERES_DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ void operator=(const TypeName&)
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#define CERES_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName(); \
+ CERES_DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+// The arraysize(arr) macro returns the # of elements in an array arr.
+// The expression is a compile-time constant, and therefore can be
+// used in defining new arrays, for example. If you use arraysize on
+// a pointer by mistake, you will get a compile-time error.
+//
+// One caveat is that arraysize() doesn't accept any array of an
+// anonymous type or a type defined inside a function. In these rare
+// cases, you have to use the unsafe ARRAYSIZE() macro below. This is
+// due to a limitation in C++'s template system. The limitation might
+// eventually be removed, but it hasn't happened yet.
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+template <typename T, size_t N>
+char (&ArraySizeHelper(T (&array)[N]))[N];
+
+// That gcc wants both of these prototypes seems mysterious. VC, for
+// its part, can't decide which to use (another mystery). Matching of
+// template overloads: the final frontier.
+#ifndef _WIN32
+template <typename T, size_t N>
+char (&ArraySizeHelper(const T (&array)[N]))[N];
+#endif
+
+#define arraysize(array) (sizeof(ArraySizeHelper(array)))
+
+// ARRAYSIZE performs essentially the same calculation as arraysize,
+// but can be used on anonymous types or types defined inside
+// functions. It's less safe than arraysize as it accepts some
+// (although not all) pointers. Therefore, you should use arraysize
+// whenever possible.
+//
+// The expression ARRAYSIZE(a) is a compile-time constant of type
+// size_t.
+//
+// ARRAYSIZE catches a few type errors. If you see a compiler error
+//
+// "warning: division by zero in ..."
+//
+// when using ARRAYSIZE, you are (wrongfully) giving it a pointer.
+// You should only use ARRAYSIZE on statically allocated arrays.
+//
+// The following comments are on the implementation details, and can
+// be ignored by the users.
+//
+// ARRAYSIZE(arr) works by inspecting sizeof(arr) (the # of bytes in
+// the array) and sizeof(*(arr)) (the # of bytes in one array
+// element). If the former is divisible by the latter, perhaps arr is
+// indeed an array, in which case the division result is the # of
+// elements in the array. Otherwise, arr cannot possibly be an array,
+// and we generate a compiler error to prevent the code from
+// compiling.
+//
+// Since the size of bool is implementation-defined, we need to cast
+// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
+// result has type size_t.
+//
+// This macro is not perfect as it wrongfully accepts certain
+// pointers, namely where the pointer size is divisible by the pointee
+// size. Since all our code has to go through a 32-bit compiler,
+// where a pointer is 4 bytes, this means all pointers to a type whose
+// size is 3 or greater than 4 will be (righteously) rejected.
+//
+// Kudos to Jorg Brown for this simple and elegant implementation.
+//
+// - wan 2005-11-16
+//
+// Starting with Visual C++ 2005, WinNT.h includes ARRAYSIZE. However,
+// the definition comes from the over-broad windows.h header that
+// introduces a macro, ERROR, that conflicts with the logging framework
+// that Ceres uses. Instead, rename ARRAYSIZE to CERES_ARRAYSIZE.
+#define CERES_ARRAYSIZE(a) \
+ ((sizeof(a) / sizeof(*(a))) / \
+ static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+
+// Tell the compiler to warn about unused return values for functions declared
+// with this macro. The macro should be used on function declarations
+// following the argument list:
+//
+// Sprocket* AllocateSprocket() MUST_USE_RESULT;
+//
+#undef MUST_USE_RESULT
+#if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) \
+ && !defined(COMPILER_ICC)
+#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
+#else
+#define MUST_USE_RESULT
+#endif
+
+// Platform independent macros to get aligned memory allocations.
+// For example
+//
+// MyFoo my_foo CERES_ALIGN_ATTRIBUTE(16);
+//
+// Gives us an instance of MyFoo which is aligned at a 16 byte
+// boundary.
+#if defined(_MSC_VER)
+#define CERES_ALIGN_ATTRIBUTE(n) __declspec(align(n))
+#define CERES_ALIGN_OF(T) __alignof(T)
+#elif defined(__GNUC__)
+#define CERES_ALIGN_ATTRIBUTE(n) __attribute__((aligned(n)))
+#define CERES_ALIGN_OF(T) __alignof(T)
+#endif
+
+#endif // CERES_PUBLIC_INTERNAL_MACROS_H_
diff --git a/include/ceres/internal/manual_constructor.h b/include/ceres/internal/manual_constructor.h
new file mode 100644
index 0000000..7ea723d
--- /dev/null
+++ b/include/ceres/internal/manual_constructor.h
@@ -0,0 +1,208 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: kenton@google.com (Kenton Varda)
+//
+// ManualConstructor statically-allocates space in which to store some
+// object, but does not initialize it. You can then call the constructor
+// and destructor for the object yourself as you see fit. This is useful
+// for memory management optimizations, where you want to initialize and
+// destroy an object multiple times but only allocate it once.
+//
+// (When I say ManualConstructor statically allocates space, I mean that
+// the ManualConstructor object itself is forced to be the right size.)
+
+#ifndef CERES_PUBLIC_INTERNAL_MANUAL_CONSTRUCTOR_H_
+#define CERES_PUBLIC_INTERNAL_MANUAL_CONSTRUCTOR_H_
+
+#include <new>
+
+namespace ceres {
+namespace internal {
+
+// ------- Define CERES_ALIGNED_CHAR_ARRAY --------------------------------
+
+#ifndef CERES_ALIGNED_CHAR_ARRAY
+
+// Because MSVC and older GCCs require that the argument to their alignment
+// construct to be a literal constant integer, we use a template instantiated
+// at all the possible powers of two.
+template<int alignment, int size> struct AlignType { };
+template<int size> struct AlignType<0, size> { typedef char result[size]; };
+
+#if !defined(CERES_ALIGN_ATTRIBUTE)
+#define CERES_ALIGNED_CHAR_ARRAY you_must_define_CERES_ALIGNED_CHAR_ARRAY_for_your_compiler
+#else // !defined(CERES_ALIGN_ATTRIBUTE)
+
+#define CERES_ALIGN_TYPE_TEMPLATE(X) \
+ template<int size> struct AlignType<X, size> { \
+ typedef CERES_ALIGN_ATTRIBUTE(X) char result[size]; \
+ }
+
+CERES_ALIGN_TYPE_TEMPLATE(1);
+CERES_ALIGN_TYPE_TEMPLATE(2);
+CERES_ALIGN_TYPE_TEMPLATE(4);
+CERES_ALIGN_TYPE_TEMPLATE(8);
+CERES_ALIGN_TYPE_TEMPLATE(16);
+CERES_ALIGN_TYPE_TEMPLATE(32);
+CERES_ALIGN_TYPE_TEMPLATE(64);
+CERES_ALIGN_TYPE_TEMPLATE(128);
+CERES_ALIGN_TYPE_TEMPLATE(256);
+CERES_ALIGN_TYPE_TEMPLATE(512);
+CERES_ALIGN_TYPE_TEMPLATE(1024);
+CERES_ALIGN_TYPE_TEMPLATE(2048);
+CERES_ALIGN_TYPE_TEMPLATE(4096);
+CERES_ALIGN_TYPE_TEMPLATE(8192);
+// Any larger and MSVC++ will complain.
+
+#undef CERES_ALIGN_TYPE_TEMPLATE
+
+#define CERES_ALIGNED_CHAR_ARRAY(T, Size) \
+ typename AlignType<CERES_ALIGN_OF(T), sizeof(T) * Size>::result
+
+#endif // !defined(CERES_ALIGN_ATTRIBUTE)
+
+#endif // CERES_ALIGNED_CHAR_ARRAY
+
+template <typename Type>
+class ManualConstructor {
+ public:
+ // No constructor or destructor because one of the most useful uses of
+ // this class is as part of a union, and members of a union cannot have
+ // constructors or destructors. And, anyway, the whole point of this
+ // class is to bypass these.
+
+ inline Type* get() {
+ return reinterpret_cast<Type*>(space_);
+ }
+ inline const Type* get() const {
+ return reinterpret_cast<const Type*>(space_);
+ }
+
+ inline Type* operator->() { return get(); }
+ inline const Type* operator->() const { return get(); }
+
+ inline Type& operator*() { return *get(); }
+ inline const Type& operator*() const { return *get(); }
+
+ // This is needed to get around the strict aliasing warning GCC generates.
+ inline void* space() {
+ return reinterpret_cast<void*>(space_);
+ }
+
+ // You can pass up to four constructor arguments as arguments of Init().
+ inline void Init() {
+ new(space()) Type;
+ }
+
+ template <typename T1>
+ inline void Init(const T1& p1) {
+ new(space()) Type(p1);
+ }
+
+ template <typename T1, typename T2>
+ inline void Init(const T1& p1, const T2& p2) {
+ new(space()) Type(p1, p2);
+ }
+
+ template <typename T1, typename T2, typename T3>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3) {
+ new(space()) Type(p1, p2, p3);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4) {
+ new(space()) Type(p1, p2, p3, p4);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5) {
+ new(space()) Type(p1, p2, p3, p4, p5);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5, const T6& p6) {
+ new(space()) Type(p1, p2, p3, p4, p5, p6);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5, const T6& p6, const T7& p7) {
+ new(space()) Type(p1, p2, p3, p4, p5, p6, p7);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5, const T6& p6, const T7& p7, const T8& p8) {
+ new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5, const T6& p6, const T7& p7, const T8& p8,
+ const T9& p9) {
+ new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5, const T6& p6, const T7& p7, const T8& p8,
+ const T9& p9, const T10& p10) {
+ new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10);
+ }
+
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+ inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
+ const T5& p5, const T6& p6, const T7& p7, const T8& p8,
+ const T9& p9, const T10& p10, const T11& p11) {
+ new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11);
+ }
+
+ inline void Destroy() {
+ get()->~Type();
+ }
+
+ private:
+ CERES_ALIGNED_CHAR_ARRAY(Type, 1) space_;
+};
+
+#undef CERES_ALIGNED_CHAR_ARRAY
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_PUBLIC_INTERNAL_MANUAL_CONSTRUCTOR_H_
diff --git a/include/ceres/internal/port.h b/include/ceres/internal/port.h
new file mode 100644
index 0000000..a9fe247
--- /dev/null
+++ b/include/ceres/internal/port.h
@@ -0,0 +1,50 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_PUBLIC_INTERNAL_PORT_H_
+#define CERES_PUBLIC_INTERNAL_PORT_H_
+
+#include <string>
+
+namespace ceres {
+
+// It is unfortunate that this import of the entire standard namespace is
+// necessary. The reasons are historical and won't be explained here, but
+// suffice to say it is not a mistake and can't be removed without breaking
+// things outside of the Ceres optimization package.
+using namespace std;
+
+// This is necessary to properly handle the case that there is a different
+// "string" implementation in the global namespace.
+using std::string;
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_INTERNAL_PORT_H_
diff --git a/include/ceres/internal/scoped_ptr.h b/include/ceres/internal/scoped_ptr.h
new file mode 100644
index 0000000..44f198b
--- /dev/null
+++ b/include/ceres/internal/scoped_ptr.h
@@ -0,0 +1,311 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: jorg@google.com (Jorg Brown)
+//
+// This is an implementation designed to match the anticipated future TR2
+// implementation of the scoped_ptr class, and its closely-related brethren,
+// scoped_array, scoped_ptr_malloc, and make_scoped_ptr.
+
+#ifndef CERES_PUBLIC_INTERNAL_SCOPED_PTR_H_
+#define CERES_PUBLIC_INTERNAL_SCOPED_PTR_H_
+
+#include <assert.h>
+#include <stdlib.h>
+#include <cstddef>
+
+namespace ceres {
+namespace internal {
+
+template <class C> class scoped_ptr;
+template <class C, class Free> class scoped_ptr_malloc;
+template <class C> class scoped_array;
+
+template <class C>
+scoped_ptr<C> make_scoped_ptr(C *);
+
+// A scoped_ptr<T> is like a T*, except that the destructor of scoped_ptr<T>
+// automatically deletes the pointer it holds (if any). That is, scoped_ptr<T>
+// owns the T object that it points to. Like a T*, a scoped_ptr<T> may hold
+// either NULL or a pointer to a T object. Also like T*, scoped_ptr<T> is
+// thread-compatible, and once you dereference it, you get the threadsafety
+// guarantees of T.
+//
+// The size of a scoped_ptr is small: sizeof(scoped_ptr<C>) == sizeof(C*)
+template <class C>
+class scoped_ptr {
+ public:
+
+ // The element type
+ typedef C element_type;
+
+ // Constructor. Defaults to intializing with NULL.
+ // There is no way to create an uninitialized scoped_ptr.
+ // The input parameter must be allocated with new.
+ explicit scoped_ptr(C* p = NULL) : ptr_(p) { }
+
+ // Destructor. If there is a C object, delete it.
+ // We don't need to test ptr_ == NULL because C++ does that for us.
+ ~scoped_ptr() {
+ enum { type_must_be_complete = sizeof(C) };
+ delete ptr_;
+ }
+
+ // Reset. Deletes the current owned object, if any.
+ // Then takes ownership of a new object, if given.
+ // this->reset(this->get()) works.
+ void reset(C* p = NULL) {
+ if (p != ptr_) {
+ enum { type_must_be_complete = sizeof(C) };
+ delete ptr_;
+ ptr_ = p;
+ }
+ }
+
+ // Accessors to get the owned object.
+ // operator* and operator-> will assert() if there is no current object.
+ C& operator*() const {
+ assert(ptr_ != NULL);
+ return *ptr_;
+ }
+ C* operator->() const {
+ assert(ptr_ != NULL);
+ return ptr_;
+ }
+ C* get() const { return ptr_; }
+
+ // Comparison operators.
+ // These return whether a scoped_ptr and a raw pointer refer to
+ // the same object, not just to two different but equal objects.
+ bool operator==(const C* p) const { return ptr_ == p; }
+ bool operator!=(const C* p) const { return ptr_ != p; }
+
+ // Swap two scoped pointers.
+ void swap(scoped_ptr& p2) {
+ C* tmp = ptr_;
+ ptr_ = p2.ptr_;
+ p2.ptr_ = tmp;
+ }
+
+ // Release a pointer.
+ // The return value is the current pointer held by this object.
+ // If this object holds a NULL pointer, the return value is NULL.
+ // After this operation, this object will hold a NULL pointer,
+ // and will not own the object any more.
+ C* release() {
+ C* retVal = ptr_;
+ ptr_ = NULL;
+ return retVal;
+ }
+
+ private:
+ C* ptr_;
+
+ // google3 friend class that can access copy ctor (although if it actually
+ // calls a copy ctor, there will be a problem) see below
+ friend scoped_ptr<C> make_scoped_ptr<C>(C *p);
+
+ // Forbid comparison of scoped_ptr types. If C2 != C, it totally doesn't
+ // make sense, and if C2 == C, it still doesn't make sense because you should
+ // never have the same object owned by two different scoped_ptrs.
+ template <class C2> bool operator==(scoped_ptr<C2> const& p2) const;
+ template <class C2> bool operator!=(scoped_ptr<C2> const& p2) const;
+
+ // Disallow evil constructors
+ scoped_ptr(const scoped_ptr&);
+ void operator=(const scoped_ptr&);
+};
+
+// Free functions
+template <class C>
+inline void swap(scoped_ptr<C>& p1, scoped_ptr<C>& p2) {
+ p1.swap(p2);
+}
+
+template <class C>
+inline bool operator==(const C* p1, const scoped_ptr<C>& p2) {
+ return p1 == p2.get();
+}
+
+template <class C>
+inline bool operator==(const C* p1, const scoped_ptr<const C>& p2) {
+ return p1 == p2.get();
+}
+
+template <class C>
+inline bool operator!=(const C* p1, const scoped_ptr<C>& p2) {
+ return p1 != p2.get();
+}
+
+template <class C>
+inline bool operator!=(const C* p1, const scoped_ptr<const C>& p2) {
+ return p1 != p2.get();
+}
+
+template <class C>
+scoped_ptr<C> make_scoped_ptr(C *p) {
+ // This does nothing but to return a scoped_ptr of the type that the passed
+ // pointer is of. (This eliminates the need to specify the name of T when
+ // making a scoped_ptr that is used anonymously/temporarily.) From an
+ // access control point of view, we construct an unnamed scoped_ptr here
+ // which we return and thus copy-construct. Hence, we need to have access
+ // to scoped_ptr::scoped_ptr(scoped_ptr const &). However, it is guaranteed
+ // that we never actually call the copy constructor, which is a good thing
+ // as we would call the temporary's object destructor (and thus delete p)
+ // if we actually did copy some object, here.
+ return scoped_ptr<C>(p);
+}
+
+// scoped_array<C> is like scoped_ptr<C>, except that the caller must allocate
+// with new [] and the destructor deletes objects with delete [].
+//
+// As with scoped_ptr<C>, a scoped_array<C> either points to an object
+// or is NULL. A scoped_array<C> owns the object that it points to.
+// scoped_array<T> is thread-compatible, and once you index into it,
+// the returned objects have only the threadsafety guarantees of T.
+//
+// Size: sizeof(scoped_array<C>) == sizeof(C*)
+template <class C>
+class scoped_array {
+ public:
+
+ // The element type
+ typedef C element_type;
+
+ // Constructor. Defaults to intializing with NULL.
+ // There is no way to create an uninitialized scoped_array.
+ // The input parameter must be allocated with new [].
+ explicit scoped_array(C* p = NULL) : array_(p) { }
+
+ // Destructor. If there is a C object, delete it.
+ // We don't need to test ptr_ == NULL because C++ does that for us.
+ ~scoped_array() {
+ enum { type_must_be_complete = sizeof(C) };
+ delete[] array_;
+ }
+
+ // Reset. Deletes the current owned object, if any.
+ // Then takes ownership of a new object, if given.
+ // this->reset(this->get()) works.
+ void reset(C* p = NULL) {
+ if (p != array_) {
+ enum { type_must_be_complete = sizeof(C) };
+ delete[] array_;
+ array_ = p;
+ }
+ }
+
+ // Get one element of the current object.
+ // Will assert() if there is no current object, or index i is negative.
+ C& operator[](std::ptrdiff_t i) const {
+ assert(i >= 0);
+ assert(array_ != NULL);
+ return array_[i];
+ }
+
+ // Get a pointer to the zeroth element of the current object.
+ // If there is no current object, return NULL.
+ C* get() const {
+ return array_;
+ }
+
+ // Comparison operators.
+ // These return whether a scoped_array and a raw pointer refer to
+ // the same array, not just to two different but equal arrays.
+ bool operator==(const C* p) const { return array_ == p; }
+ bool operator!=(const C* p) const { return array_ != p; }
+
+ // Swap two scoped arrays.
+ void swap(scoped_array& p2) {
+ C* tmp = array_;
+ array_ = p2.array_;
+ p2.array_ = tmp;
+ }
+
+ // Release an array.
+ // The return value is the current pointer held by this object.
+ // If this object holds a NULL pointer, the return value is NULL.
+ // After this operation, this object will hold a NULL pointer,
+ // and will not own the object any more.
+ C* release() {
+ C* retVal = array_;
+ array_ = NULL;
+ return retVal;
+ }
+
+ private:
+ C* array_;
+
+ // Forbid comparison of different scoped_array types.
+ template <class C2> bool operator==(scoped_array<C2> const& p2) const;
+ template <class C2> bool operator!=(scoped_array<C2> const& p2) const;
+
+ // Disallow evil constructors
+ scoped_array(const scoped_array&);
+ void operator=(const scoped_array&);
+};
+
+// Free functions
+template <class C>
+inline void swap(scoped_array<C>& p1, scoped_array<C>& p2) {
+ p1.swap(p2);
+}
+
+template <class C>
+inline bool operator==(const C* p1, const scoped_array<C>& p2) {
+ return p1 == p2.get();
+}
+
+template <class C>
+inline bool operator==(const C* p1, const scoped_array<const C>& p2) {
+ return p1 == p2.get();
+}
+
+template <class C>
+inline bool operator!=(const C* p1, const scoped_array<C>& p2) {
+ return p1 != p2.get();
+}
+
+template <class C>
+inline bool operator!=(const C* p1, const scoped_array<const C>& p2) {
+ return p1 != p2.get();
+}
+
+// This class wraps the c library function free() in a class that can be
+// passed as a template argument to scoped_ptr_malloc below.
+class ScopedPtrMallocFree {
+ public:
+ inline void operator()(void* x) const {
+ free(x);
+ }
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_PUBLIC_INTERNAL_SCOPED_PTR_H_
diff --git a/include/ceres/iteration_callback.h b/include/ceres/iteration_callback.h
new file mode 100644
index 0000000..57cf0a6
--- /dev/null
+++ b/include/ceres/iteration_callback.h
@@ -0,0 +1,200 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// When an iteration callback is specified, Ceres calls the callback
+// after each minimizer step (if the minimizer has not converged) and
+// passes it an IterationSummary object, defined below.
+
+#ifndef CERES_PUBLIC_ITERATION_CALLBACK_H_
+#define CERES_PUBLIC_ITERATION_CALLBACK_H_
+
+#include "ceres/types.h"
+
+namespace ceres {
+
+// This struct describes the state of the optimizer after each
+// iteration of the minimization.
+struct IterationSummary {
+ IterationSummary()
+ : iteration(0),
+ step_is_valid(false),
+ step_is_nonmonotonic(false),
+ step_is_successful(false),
+ cost(0.0),
+ cost_change(0.0),
+ gradient_max_norm(0.0),
+ step_norm(0.0),
+ eta(0.0),
+ linear_solver_iterations(0),
+ iteration_time_in_seconds(0.0),
+ step_solver_time_in_seconds(0.0),
+ cumulative_time_in_seconds(0.0) {}
+
+ // Current iteration number.
+ int32 iteration;
+
+ // Step was numerically valid, i.e., all values are finite and the
+ // step reduces the value of the linearized model.
+ //
+ // Note: step_is_valid is false when iteration = 0.
+ bool step_is_valid;
+
+ // Step did not reduce the value of the objective function
+ // sufficiently, but it was accepted because of the relaxed
+ // acceptance criterion used by the non-monotonic trust region
+ // algorithm.
+ //
+ // Note: step_is_nonmonotonic is false when iteration = 0;
+ bool step_is_nonmonotonic;
+
+ // Whether or not the minimizer accepted this step or not. If the
+ // ordinary trust region algorithm is used, this means that the
+ // relative reduction in the objective function value was greater
+ // than Solver::Options::min_relative_decrease. However, if the
+ // non-monotonic trust region algorithm is used
+ // (Solver::Options:use_nonmonotonic_steps = true), then even if the
+ // relative decrease is not sufficient, the algorithm may accept the
+ // step and the step is declared successful.
+ //
+ // Note: step_is_successful is false when iteration = 0.
+ bool step_is_successful;
+
+ // Value of the objective function.
+ double cost;
+
+ // Change in the value of the objective function in this
+ // iteration. This can be positive or negative.
+ double cost_change;
+
+ // Infinity norm of the gradient vector.
+ double gradient_max_norm;
+
+ // 2-norm of the size of the step computed by the optimization
+ // algorithm.
+ double step_norm;
+
+ // For trust region algorithms, the ratio of the actual change in
+ // cost and the change in the cost of the linearized approximation.
+ double relative_decrease;
+
+ // Size of the trust region at the end of the current iteration. For
+ // the Levenberg-Marquardt algorithm, the regularization parameter
+ // mu = 1.0 / trust_region_radius.
+ double trust_region_radius;
+
+ // For the inexact step Levenberg-Marquardt algorithm, this is the
+ // relative accuracy with which the Newton(LM) step is solved. This
+ // number affects only the iterative solvers capable of solving
+ // linear systems inexactly. Factorization-based exact solvers
+ // ignore it.
+ double eta;
+
+ // Number of iterations taken by the linear solver to solve for the
+ // Newton step.
+ int linear_solver_iterations;
+
+ // Time (in seconds) spent inside the minimizer loop in the current
+ // iteration.
+ double iteration_time_in_seconds;
+
+ // Time (in seconds) spent inside the trust region step solver.
+ double step_solver_time_in_seconds;
+
+ // Time (in seconds) since the user called Solve().
+ double cumulative_time_in_seconds;
+};
+
+// Interface for specifying callbacks that are executed at the end of
+// each iteration of the Minimizer. The solver uses the return value
+// of operator() to decide whether to continue solving or to
+// terminate. The user can return three values.
+//
+// SOLVER_ABORT indicates that the callback detected an abnormal
+// situation. The solver returns without updating the parameter blocks
+// (unless Solver::Options::update_state_every_iteration is set
+// true). Solver returns with Solver::Summary::termination_type set to
+// USER_ABORT.
+//
+// SOLVER_TERMINATE_SUCCESSFULLY indicates that there is no need to
+// optimize anymore (some user specified termination criterion has
+// been met). Solver returns with Solver::Summary::termination_type
+// set to USER_SUCCESS.
+//
+// SOLVER_CONTINUE indicates that the solver should continue
+// optimizing.
+//
+// For example, the following Callback is used internally by Ceres to
+// log the progress of the optimization.
+//
+// Callback for logging the state of the minimizer to STDERR or STDOUT
+// depending on the user's preferences and logging level.
+//
+// class LoggingCallback : public IterationCallback {
+// public:
+// explicit LoggingCallback(bool log_to_stdout)
+// : log_to_stdout_(log_to_stdout) {}
+//
+// ~LoggingCallback() {}
+//
+// CallbackReturnType operator()(const IterationSummary& summary) {
+// const char* kReportRowFormat =
+// "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
+// "rho:% 3.2e mu:% 3.2e eta:% 3.2e li:% 3d";
+// string output = StringPrintf(kReportRowFormat,
+// summary.iteration,
+// summary.cost,
+// summary.cost_change,
+// summary.gradient_max_norm,
+// summary.step_norm,
+// summary.relative_decrease,
+// summary.trust_region_radius,
+// summary.eta,
+// summary.linear_solver_iterations);
+// if (log_to_stdout_) {
+// cout << output << endl;
+// } else {
+// VLOG(1) << output;
+// }
+// return SOLVER_CONTINUE;
+// }
+//
+// private:
+// const bool log_to_stdout_;
+// };
+//
+class IterationCallback {
+ public:
+ virtual ~IterationCallback() {}
+ virtual CallbackReturnType operator()(const IterationSummary& summary) = 0;
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_ITERATION_CALLBACK_H_
diff --git a/include/ceres/jet.h b/include/ceres/jet.h
new file mode 100644
index 0000000..96e2256
--- /dev/null
+++ b/include/ceres/jet.h
@@ -0,0 +1,678 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A simple implementation of N-dimensional dual numbers, for automatically
+// computing exact derivatives of functions.
+//
+// While a complete treatment of the mechanics of automatic differentation is
+// beyond the scope of this header (see
+// http://en.wikipedia.org/wiki/Automatic_differentiation for details), the
+// basic idea is to extend normal arithmetic with an extra element, "e," often
+// denoted with the greek symbol epsilon, such that e != 0 but e^2 = 0. Dual
+// numbers are extensions of the real numbers analogous to complex numbers:
+// whereas complex numbers augment the reals by introducing an imaginary unit i
+// such that i^2 = -1, dual numbers introduce an "infinitesimal" unit e such
+// that e^2 = 0. Dual numbers have two components: the "real" component and the
+// "infinitesimal" component, generally written as x + y*e. Surprisingly, this
+// leads to a convenient method for computing exact derivatives without needing
+// to manipulate complicated symbolic expressions.
+//
+// For example, consider the function
+//
+// f(x) = x^2 ,
+//
+// evaluated at 10. Using normal arithmetic, f(10) = 100, and df/dx(10) = 20.
+// Next, augument 10 with an infinitesimal to get:
+//
+// f(10 + e) = (10 + e)^2
+// = 100 + 2 * 10 * e + e^2
+// = 100 + 20 * e -+-
+// -- |
+// | +--- This is zero, since e^2 = 0
+// |
+// +----------------- This is df/dx!
+//
+// Note that the derivative of f with respect to x is simply the infinitesimal
+// component of the value of f(x + e). So, in order to take the derivative of
+// any function, it is only necessary to replace the numeric "object" used in
+// the function with one extended with infinitesimals. The class Jet, defined in
+// this header, is one such example of this, where substitution is done with
+// templates.
+//
+// To handle derivatives of functions taking multiple arguments, different
+// infinitesimals are used, one for each variable to take the derivative of. For
+// example, consider a scalar function of two scalar parameters x and y:
+//
+// f(x, y) = x^2 + x * y
+//
+// Following the technique above, to compute the derivatives df/dx and df/dy for
+// f(1, 3) involves doing two evaluations of f, the first time replacing x with
+// x + e, the second time replacing y with y + e.
+//
+// For df/dx:
+//
+// f(1 + e, y) = (1 + e)^2 + (1 + e) * 3
+// = 1 + 2 * e + 3 + 3 * e
+// = 4 + 5 * e
+//
+// --> df/dx = 5
+//
+// For df/dy:
+//
+// f(1, 3 + e) = 1^2 + 1 * (3 + e)
+// = 1 + 3 + e
+// = 4 + e
+//
+// --> df/dy = 1
+//
+// To take the gradient of f with the implementation of dual numbers ("jets") in
+// this file, it is necessary to create a single jet type which has components
+// for the derivative in x and y, and passing them to a templated version of f:
+//
+// template<typename T>
+// T f(const T &x, const T &y) {
+// return x * x + x * y;
+// }
+//
+// // The "2" means there should be 2 dual number components.
+// Jet<double, 2> x(0); // Pick the 0th dual number for x.
+// Jet<double, 2> y(1); // Pick the 1st dual number for y.
+// Jet<double, 2> z = f(x, y);
+//
+// LG << "df/dx = " << z.a[0]
+// << "df/dy = " << z.a[1];
+//
+// Most users should not use Jet objects directly; a wrapper around Jet objects,
+// which makes computing the derivative, gradient, or jacobian of templated
+// functors simple, is in autodiff.h. Even autodiff.h should not be used
+// directly; instead autodiff_cost_function.h is typically the file of interest.
+//
+// For the more mathematically inclined, this file implements first-order
+// "jets". A 1st order jet is an element of the ring
+//
+// T[N] = T[t_1, ..., t_N] / (t_1, ..., t_N)^2
+//
+// which essentially means that each jet consists of a "scalar" value 'a' from T
+// and a 1st order perturbation vector 'v' of length N:
+//
+// x = a + \sum_i v[i] t_i
+//
+// A shorthand is to write an element as x = a + u, where u is the pertubation.
+// Then, the main point about the arithmetic of jets is that the product of
+// perturbations is zero:
+//
+// (a + u) * (b + v) = ab + av + bu + uv
+// = ab + (av + bu) + 0
+//
+// which is what operator* implements below. Addition is simpler:
+//
+// (a + u) + (b + v) = (a + b) + (u + v).
+//
+// The only remaining question is how to evaluate the function of a jet, for
+// which we use the chain rule:
+//
+// f(a + u) = f(a) + f'(a) u
+//
+// where f'(a) is the (scalar) derivative of f at a.
+//
+// By pushing these things through sufficiently and suitably templated
+// functions, we can do automatic differentiation. Just be sure to turn on
+// function inlining and common-subexpression elimination, or it will be very
+// slow!
+//
+// WARNING: Most Ceres users should not directly include this file or know the
+// details of how jets work. Instead the suggested method for automatic
+// derivatives is to use autodiff_cost_function.h, which is a wrapper around
+// both jets.h and autodiff.h to make taking derivatives of cost functions for
+// use in Ceres easier.
+
+#ifndef CERES_PUBLIC_JET_H_
+#define CERES_PUBLIC_JET_H_
+
+#include <cmath>
+#include <iosfwd>
+#include <iostream> // NOLINT
+#include <string>
+
+#include "Eigen/Core"
+#include "ceres/fpclassify.h"
+
+namespace ceres {
+
+template <typename T, int N>
+struct Jet {
+ enum { DIMENSION = N };
+
+ // Default-construct "a" because otherwise this can lead to false errors about
+ // uninitialized uses when other classes relying on default constructed T
+ // (where T is a Jet<T, N>). This usually only happens in opt mode. Note that
+ // the C++ standard mandates that e.g. default constructed doubles are
+ // initialized to 0.0; see sections 8.5 of the C++03 standard.
+ Jet() : a() {
+ v.setZero();
+ }
+
+ // Constructor from scalar: a + 0.
+ explicit Jet(const T& value) {
+ a = value;
+ v.setZero();
+ }
+
+ // Constructor from scalar plus variable: a + t_i.
+ Jet(const T& value, int k) {
+ a = value;
+ v.setZero();
+ v[k] = T(1.0);
+ }
+
+ // Compound operators
+ Jet<T, N>& operator+=(const Jet<T, N> &y) {
+ *this = *this + y;
+ return *this;
+ }
+
+ Jet<T, N>& operator-=(const Jet<T, N> &y) {
+ *this = *this - y;
+ return *this;
+ }
+
+ Jet<T, N>& operator*=(const Jet<T, N> &y) {
+ *this = *this * y;
+ return *this;
+ }
+
+ Jet<T, N>& operator/=(const Jet<T, N> &y) {
+ *this = *this / y;
+ return *this;
+ }
+
+ // The scalar part.
+ T a;
+
+ // The infinitesimal part.
+ //
+ // Note the Eigen::DontAlign bit is needed here because this object
+ // gets allocated on the stack and as part of other arrays and
+ // structs. Forcing the right alignment there is the source of much
+ // pain and suffering. Even if that works, passing Jets around to
+ // functions by value has problems because the C++ ABI does not
+ // guarantee alignment for function arguments.
+ //
+ // Setting the DontAlign bit prevents Eigen from using SSE for the
+ // various operations on Jets. This is a small performance penalty
+ // since the AutoDiff code will still expose much of the code as
+ // statically sized loops to the compiler. But given the subtle
+ // issues that arise due to alignment, especially when dealing with
+ // multiple platforms, it seems to be a trade off worth making.
+ Eigen::Matrix<T, N, 1, Eigen::DontAlign> v;
+};
+
+// Unary +
+template<typename T, int N> inline
+Jet<T, N> const& operator+(const Jet<T, N>& f) {
+ return f;
+}
+
+// TODO(keir): Try adding __attribute__((always_inline)) to these functions to
+// see if it causes a performance increase.
+
+// Unary -
+template<typename T, int N> inline
+Jet<T, N> operator-(const Jet<T, N>&f) {
+ Jet<T, N> g;
+ g.a = -f.a;
+ g.v = -f.v;
+ return g;
+}
+
+// Binary +
+template<typename T, int N> inline
+Jet<T, N> operator+(const Jet<T, N>& f,
+ const Jet<T, N>& g) {
+ Jet<T, N> h;
+ h.a = f.a + g.a;
+ h.v = f.v + g.v;
+ return h;
+}
+
+// Binary + with a scalar: x + s
+template<typename T, int N> inline
+Jet<T, N> operator+(const Jet<T, N>& f, T s) {
+ Jet<T, N> h;
+ h.a = f.a + s;
+ h.v = f.v;
+ return h;
+}
+
+// Binary + with a scalar: s + x
+template<typename T, int N> inline
+Jet<T, N> operator+(T s, const Jet<T, N>& f) {
+ Jet<T, N> h;
+ h.a = f.a + s;
+ h.v = f.v;
+ return h;
+}
+
+// Binary -
+template<typename T, int N> inline
+Jet<T, N> operator-(const Jet<T, N>& f,
+ const Jet<T, N>& g) {
+ Jet<T, N> h;
+ h.a = f.a - g.a;
+ h.v = f.v - g.v;
+ return h;
+}
+
+// Binary - with a scalar: x - s
+template<typename T, int N> inline
+Jet<T, N> operator-(const Jet<T, N>& f, T s) {
+ Jet<T, N> h;
+ h.a = f.a - s;
+ h.v = f.v;
+ return h;
+}
+
+// Binary - with a scalar: s - x
+template<typename T, int N> inline
+Jet<T, N> operator-(T s, const Jet<T, N>& f) {
+ Jet<T, N> h;
+ h.a = s - f.a;
+ h.v = -f.v;
+ return h;
+}
+
+// Binary *
+template<typename T, int N> inline
+Jet<T, N> operator*(const Jet<T, N>& f,
+ const Jet<T, N>& g) {
+ Jet<T, N> h;
+ h.a = f.a * g.a;
+ h.v = f.a * g.v + f.v * g.a;
+ return h;
+}
+
+// Binary * with a scalar: x * s
+template<typename T, int N> inline
+Jet<T, N> operator*(const Jet<T, N>& f, T s) {
+ Jet<T, N> h;
+ h.a = f.a * s;
+ h.v = f.v * s;
+ return h;
+}
+
+// Binary * with a scalar: s * x
+template<typename T, int N> inline
+Jet<T, N> operator*(T s, const Jet<T, N>& f) {
+ Jet<T, N> h;
+ h.a = f.a * s;
+ h.v = f.v * s;
+ return h;
+}
+
+// Binary /
+template<typename T, int N> inline
+Jet<T, N> operator/(const Jet<T, N>& f,
+ const Jet<T, N>& g) {
+ Jet<T, N> h;
+ // This uses:
+ //
+ // a + u (a + u)(b - v) (a + u)(b - v)
+ // ----- = -------------- = --------------
+ // b + v (b + v)(b - v) b^2
+ //
+ // which holds because v*v = 0.
+ h.a = f.a / g.a;
+ h.v = (f.v - f.a / g.a * g.v) / g.a;
+ return h;
+}
+
+// Binary / with a scalar: s / x
+template<typename T, int N> inline
+Jet<T, N> operator/(T s, const Jet<T, N>& g) {
+ Jet<T, N> h;
+ h.a = s / g.a;
+ h.v = - s * g.v / (g.a * g.a);
+ return h;
+}
+
+// Binary / with a scalar: x / s
+template<typename T, int N> inline
+Jet<T, N> operator/(const Jet<T, N>& f, T s) {
+ Jet<T, N> h;
+ h.a = f.a / s;
+ h.v = f.v / s;
+ return h;
+}
+
+// Binary comparison operators for both scalars and jets.
+#define CERES_DEFINE_JET_COMPARISON_OPERATOR(op) \
+template<typename T, int N> inline \
+bool operator op(const Jet<T, N>& f, const Jet<T, N>& g) { \
+ return f.a op g.a; \
+} \
+template<typename T, int N> inline \
+bool operator op(const T& s, const Jet<T, N>& g) { \
+ return s op g.a; \
+} \
+template<typename T, int N> inline \
+bool operator op(const Jet<T, N>& f, const T& s) { \
+ return f.a op s; \
+}
+CERES_DEFINE_JET_COMPARISON_OPERATOR( < ) // NOLINT
+CERES_DEFINE_JET_COMPARISON_OPERATOR( <= ) // NOLINT
+CERES_DEFINE_JET_COMPARISON_OPERATOR( > ) // NOLINT
+CERES_DEFINE_JET_COMPARISON_OPERATOR( >= ) // NOLINT
+CERES_DEFINE_JET_COMPARISON_OPERATOR( == ) // NOLINT
+CERES_DEFINE_JET_COMPARISON_OPERATOR( != ) // NOLINT
+#undef CERES_DEFINE_JET_COMPARISON_OPERATOR
+
+// Pull some functions from namespace std.
+//
+// This is necessary because we want to use the same name (e.g. 'sqrt') for
+// double-valued and Jet-valued functions, but we are not allowed to put
+// Jet-valued functions inside namespace std.
+//
+// Missing: cosh, sinh, tanh, tan
+// TODO(keir): Switch to "using".
+inline double abs (double x) { return std::abs(x); }
+inline double log (double x) { return std::log(x); }
+inline double exp (double x) { return std::exp(x); }
+inline double sqrt (double x) { return std::sqrt(x); }
+inline double cos (double x) { return std::cos(x); }
+inline double acos (double x) { return std::acos(x); }
+inline double sin (double x) { return std::sin(x); }
+inline double asin (double x) { return std::asin(x); }
+inline double pow (double x, double y) { return std::pow(x, y); }
+inline double atan2(double y, double x) { return std::atan2(y, x); }
+
+// In general, f(a + h) ~= f(a) + f'(a) h, via the chain rule.
+
+// abs(x + h) ~= x + h or -(x + h)
+template <typename T, int N> inline
+Jet<T, N> abs(const Jet<T, N>& f) {
+ return f.a < T(0.0) ? -f : f;
+}
+
+// log(a + h) ~= log(a) + h / a
+template <typename T, int N> inline
+Jet<T, N> log(const Jet<T, N>& f) {
+ Jet<T, N> g;
+ g.a = log(f.a);
+ g.v = f.v / f.a;
+ return g;
+}
+
+// exp(a + h) ~= exp(a) + exp(a) h
+template <typename T, int N> inline
+Jet<T, N> exp(const Jet<T, N>& f) {
+ Jet<T, N> g;
+ g.a = exp(f.a);
+ g.v = g.a * f.v;
+ return g;
+}
+
+// sqrt(a + h) ~= sqrt(a) + h / (2 sqrt(a))
+template <typename T, int N> inline
+Jet<T, N> sqrt(const Jet<T, N>& f) {
+ Jet<T, N> g;
+ g.a = sqrt(f.a);
+ g.v = f.v / (T(2.0) * g.a);
+ return g;
+}
+
+// cos(a + h) ~= cos(a) - sin(a) h
+template <typename T, int N> inline
+Jet<T, N> cos(const Jet<T, N>& f) {
+ Jet<T, N> g;
+ g.a = cos(f.a);
+ T sin_a = sin(f.a);
+ g.v = - sin_a * f.v;
+ return g;
+}
+
+// acos(a + h) ~= acos(a) - 1 / sqrt(1 - a^2) h
+template <typename T, int N> inline
+Jet<T, N> acos(const Jet<T, N>& f) {
+ Jet<T, N> g;
+ g.a = acos(f.a);
+ g.v = - T(1.0) / sqrt(T(1.0) - f.a * f.a) * f.v;
+ return g;
+}
+
+// sin(a + h) ~= sin(a) + cos(a) h
+template <typename T, int N> inline
+Jet<T, N> sin(const Jet<T, N>& f) {
+ Jet<T, N> g;
+ g.a = sin(f.a);
+ T cos_a = cos(f.a);
+ g.v = cos_a * f.v;
+ return g;
+}
+
+// asin(a + h) ~= asin(a) + 1 / sqrt(1 - a^2) h
+template <typename T, int N> inline
+Jet<T, N> asin(const Jet<T, N>& f) {
+ Jet<T, N> g;
+ g.a = asin(f.a);
+ g.v = T(1.0) / sqrt(T(1.0) - f.a * f.a) * f.v;
+ return g;
+}
+
+// Jet Classification. It is not clear what the appropriate semantics are for
+// these classifications. This picks that IsFinite and isnormal are "all"
+// operations, i.e. all elements of the jet must be finite for the jet itself
+// to be finite (or normal). For IsNaN and IsInfinite, the answer is less
+// clear. This takes a "any" approach for IsNaN and IsInfinite such that if any
+// part of a jet is nan or inf, then the entire jet is nan or inf. This leads
+// to strange situations like a jet can be both IsInfinite and IsNaN, but in
+// practice the "any" semantics are the most useful for e.g. checking that
+// derivatives are sane.
+
+// The jet is finite if all parts of the jet are finite.
+template <typename T, int N> inline
+bool IsFinite(const Jet<T, N>& f) {
+ if (!IsFinite(f.a)) {
+ return false;
+ }
+ for (int i = 0; i < N; ++i) {
+ if (!IsFinite(f.v[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// The jet is infinite if any part of the jet is infinite.
+template <typename T, int N> inline
+bool IsInfinite(const Jet<T, N>& f) {
+ if (IsInfinite(f.a)) {
+ return true;
+ }
+ for (int i = 0; i < N; i++) {
+ if (IsInfinite(f.v[i])) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// The jet is NaN if any part of the jet is NaN.
+template <typename T, int N> inline
+bool IsNaN(const Jet<T, N>& f) {
+ if (IsNaN(f.a)) {
+ return true;
+ }
+ for (int i = 0; i < N; ++i) {
+ if (IsNaN(f.v[i])) {
+ return true;
+ }
+ }
+ return false;
+}
+
+// The jet is normal if all parts of the jet are normal.
+template <typename T, int N> inline
+bool IsNormal(const Jet<T, N>& f) {
+ if (!IsNormal(f.a)) {
+ return false;
+ }
+ for (int i = 0; i < N; ++i) {
+ if (!IsNormal(f.v[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// atan2(b + db, a + da) ~= atan2(b, a) + (- b da + a db) / (a^2 + b^2)
+//
+// In words: the rate of change of theta is 1/r times the rate of
+// change of (x, y) in the positive angular direction.
+template <typename T, int N> inline
+Jet<T, N> atan2(const Jet<T, N>& g, const Jet<T, N>& f) {
+ // Note order of arguments:
+ //
+ // f = a + da
+ // g = b + db
+
+ Jet<T, N> out;
+
+ out.a = atan2(g.a, f.a);
+
+ T const temp = T(1.0) / (f.a * f.a + g.a * g.a);
+ out.v = temp * (- g.a * f.v + f.a * g.v);
+ return out;
+}
+
+
+// pow -- base is a differentiatble function, exponent is a constant.
+// (a+da)^p ~= a^p + p*a^(p-1) da
+template <typename T, int N> inline
+Jet<T, N> pow(const Jet<T, N>& f, double g) {
+ Jet<T, N> out;
+ out.a = pow(f.a, g);
+ T const temp = g * pow(f.a, g - T(1.0));
+ out.v = temp * f.v;
+ return out;
+}
+
+// pow -- base is a constant, exponent is a differentiable function.
+// (a)^(p+dp) ~= a^p + a^p log(a) dp
+template <typename T, int N> inline
+Jet<T, N> pow(double f, const Jet<T, N>& g) {
+ Jet<T, N> out;
+ out.a = pow(f, g.a);
+ T const temp = log(f) * out.a;
+ out.v = temp * g.v;
+ return out;
+}
+
+
+// pow -- both base and exponent are differentiable functions.
+// (a+da)^(b+db) ~= a^b + b * a^(b-1) da + a^b log(a) * db
+template <typename T, int N> inline
+Jet<T, N> pow(const Jet<T, N>& f, const Jet<T, N>& g) {
+ Jet<T, N> out;
+
+ T const temp1 = pow(f.a, g.a);
+ T const temp2 = g.a * pow(f.a, g.a - T(1.0));
+ T const temp3 = temp1 * log(f.a);
+
+ out.a = temp1;
+ out.v = temp2 * f.v + temp3 * g.v;
+ return out;
+}
+
+// Define the helper functions Eigen needs to embed Jet types.
+//
+// NOTE(keir): machine_epsilon() and precision() are missing, because they don't
+// work with nested template types (e.g. where the scalar is itself templated).
+// Among other things, this means that decompositions of Jet's does not work,
+// for example
+//
+// Matrix<Jet<T, N> ... > A, x, b;
+// ...
+// A.solve(b, &x)
+//
+// does not work and will fail with a strange compiler error.
+//
+// TODO(keir): This is an Eigen 2.0 limitation that is lifted in 3.0. When we
+// switch to 3.0, also add the rest of the specialization functionality.
+template<typename T, int N> inline const Jet<T, N>& ei_conj(const Jet<T, N>& x) { return x; } // NOLINT
+template<typename T, int N> inline const Jet<T, N>& ei_real(const Jet<T, N>& x) { return x; } // NOLINT
+template<typename T, int N> inline Jet<T, N> ei_imag(const Jet<T, N>& ) { return Jet<T, N>(0.0); } // NOLINT
+template<typename T, int N> inline Jet<T, N> ei_abs (const Jet<T, N>& x) { return fabs(x); } // NOLINT
+template<typename T, int N> inline Jet<T, N> ei_abs2(const Jet<T, N>& x) { return x * x; } // NOLINT
+template<typename T, int N> inline Jet<T, N> ei_sqrt(const Jet<T, N>& x) { return sqrt(x); } // NOLINT
+template<typename T, int N> inline Jet<T, N> ei_exp (const Jet<T, N>& x) { return exp(x); } // NOLINT
+template<typename T, int N> inline Jet<T, N> ei_log (const Jet<T, N>& x) { return log(x); } // NOLINT
+template<typename T, int N> inline Jet<T, N> ei_sin (const Jet<T, N>& x) { return sin(x); } // NOLINT
+template<typename T, int N> inline Jet<T, N> ei_cos (const Jet<T, N>& x) { return cos(x); } // NOLINT
+template<typename T, int N> inline Jet<T, N> ei_pow (const Jet<T, N>& x, Jet<T, N> y) { return pow(x, y); } // NOLINT
+
+// Note: This has to be in the ceres namespace for argument dependent lookup to
+// function correctly. Otherwise statements like CHECK_LE(x, 2.0) fail with
+// strange compile errors.
+template <typename T, int N>
+inline std::ostream &operator<<(std::ostream &s, const Jet<T, N>& z) {
+ return s << "[" << z.a << " ; " << z.v.transpose() << "]";
+}
+
+} // namespace ceres
+
+namespace Eigen {
+
+// Creating a specialization of NumTraits enables placing Jet objects inside
+// Eigen arrays, getting all the goodness of Eigen combined with autodiff.
+template<typename T, int N>
+struct NumTraits<ceres::Jet<T, N> > {
+ typedef ceres::Jet<T, N> Real;
+ typedef ceres::Jet<T, N> NonInteger;
+ typedef ceres::Jet<T, N> Nested;
+
+ static typename ceres::Jet<T, N> dummy_precision() {
+ return ceres::Jet<T, N>(1e-12);
+ }
+
+ enum {
+ IsComplex = 0,
+ IsInteger = 0,
+ IsSigned,
+ ReadCost = 1,
+ AddCost = 1,
+ // For Jet types, multiplication is more expensive than addition.
+ MulCost = 3,
+ HasFloatingPoint = 1
+ };
+};
+
+} // namespace Eigen
+
+#endif // CERES_PUBLIC_JET_H_
diff --git a/include/ceres/local_parameterization.h b/include/ceres/local_parameterization.h
new file mode 100644
index 0000000..c0f7dc7
--- /dev/null
+++ b/include/ceres/local_parameterization.h
@@ -0,0 +1,189 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+// sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
+#define CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
+
+#include <vector>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+// Purpose: Sometimes parameter blocks x can overparameterize a problem
+//
+// min f(x)
+// x
+//
+// In that case it is desirable to choose a parameterization for the
+// block itself to remove the null directions of the cost. More
+// generally, if x lies on a manifold of a smaller dimension than the
+// ambient space that it is embedded in, then it is numerically and
+// computationally more effective to optimize it using a
+// parameterization that lives in the tangent space of that manifold
+// at each point.
+//
+// For example, a sphere in three dimensions is a 2 dimensional
+// manifold, embedded in a three dimensional space. At each point on
+// the sphere, the plane tangent to it defines a two dimensional
+// tangent space. For a cost function defined on this sphere, given a
+// point x, moving in the direction normal to the sphere at that point
+// is not useful. Thus a better way to do a local optimization is to
+// optimize over two dimensional vector delta in the tangent space at
+// that point and then "move" to the point x + delta, where the move
+// operation involves projecting back onto the sphere. Doing so
+// removes a redundent dimension from the optimization, making it
+// numerically more robust and efficient.
+//
+// More generally we can define a function
+//
+// x_plus_delta = Plus(x, delta),
+//
+// where x_plus_delta has the same size as x, and delta is of size
+// less than or equal to x. The function Plus, generalizes the
+// definition of vector addition. Thus it satisfies the identify
+//
+// Plus(x, 0) = x, for all x.
+//
+// A trivial version of Plus is when delta is of the same size as x
+// and
+//
+// Plus(x, delta) = x + delta
+//
+// A more interesting case if x is two dimensional vector, and the
+// user wishes to hold the first coordinate constant. Then, delta is a
+// scalar and Plus is defined as
+//
+// Plus(x, delta) = x + [0] * delta
+// [1]
+//
+// An example that occurs commonly in Structure from Motion problems
+// is when camera rotations are parameterized using Quaternion. There,
+// it is useful only make updates orthogonal to that 4-vector defining
+// the quaternion. One way to do this is to let delta be a 3
+// dimensional vector and define Plus to be
+//
+// Plus(x, delta) = [cos(|delta|), sin(|delta|) delta / |delta|] * x
+//
+// The multiplication between the two 4-vectors on the RHS is the
+// standard quaternion product.
+//
+// Given g and a point x, optimizing f can now be restated as
+//
+// min f(Plus(x, delta))
+// delta
+//
+// Given a solution delta to this problem, the optimal value is then
+// given by
+//
+// x* = Plus(x, delta)
+//
+// The class LocalParameterization defines the function Plus and its
+// Jacobian which is needed to compute the Jacobian of f w.r.t delta.
+class LocalParameterization {
+ public:
+ virtual ~LocalParameterization() {}
+
+ // Generalization of the addition operation,
+ //
+ // x_plus_delta = Plus(x, delta)
+ //
+ // with the condition that Plus(x, 0) = x.
+ virtual bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const = 0;
+
+ // The jacobian of Plus(x, delta) w.r.t delta at delta = 0.
+ virtual bool ComputeJacobian(const double* x, double* jacobian) const = 0;
+
+ // Size of x.
+ virtual int GlobalSize() const = 0;
+
+ // Size of delta.
+ virtual int LocalSize() const = 0;
+};
+
+// Some basic parameterizations
+
+// Identity Parameterization: Plus(x, delta) = x + delta
+class IdentityParameterization : public LocalParameterization {
+ public:
+ explicit IdentityParameterization(int size);
+ virtual ~IdentityParameterization() {}
+ virtual bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const;
+ virtual bool ComputeJacobian(const double* x,
+ double* jacobian) const;
+ virtual int GlobalSize() const { return size_; }
+ virtual int LocalSize() const { return size_; }
+
+ private:
+ const int size_;
+};
+
+// Hold a subset of the parameters inside a parameter block constant.
+class SubsetParameterization : public LocalParameterization {
+ public:
+ explicit SubsetParameterization(int size,
+ const vector<int>& constant_parameters);
+ virtual ~SubsetParameterization() {}
+ virtual bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const;
+ virtual bool ComputeJacobian(const double* x,
+ double* jacobian) const;
+ virtual int GlobalSize() const { return constancy_mask_.size(); }
+ virtual int LocalSize() const { return local_size_; }
+
+ private:
+ const int local_size_;
+ vector<int> constancy_mask_;
+};
+
+// Plus(x, delta) = [cos(|delta|), sin(|delta|) delta / |delta|] * x
+// with * being the quaternion multiplication operator. Here we assume
+// that the first element of the quaternion vector is the real (cos
+// theta) part.
+class QuaternionParameterization : public LocalParameterization {
+ public:
+ virtual ~QuaternionParameterization() {}
+ virtual bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const;
+ virtual bool ComputeJacobian(const double* x,
+ double* jacobian) const;
+ virtual int GlobalSize() const { return 4; }
+ virtual int LocalSize() const { return 3; }
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
diff --git a/include/ceres/loss_function.h b/include/ceres/loss_function.h
new file mode 100644
index 0000000..0c0ceaa
--- /dev/null
+++ b/include/ceres/loss_function.h
@@ -0,0 +1,397 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// The LossFunction interface is the way users describe how residuals
+// are converted to cost terms for the overall problem cost function.
+// For the exact manner in which loss functions are converted to the
+// overall cost for a problem, see problem.h.
+//
+// For least squares problem where there are no outliers and standard
+// squared loss is expected, it is not necessary to create a loss
+// function; instead passing a NULL to the problem when adding
+// residuals implies a standard squared loss.
+//
+// For least squares problems where the minimization may encounter
+// input terms that contain outliers, that is, completely bogus
+// measurements, it is important to use a loss function that reduces
+// their associated penalty.
+//
+// Consider a structure from motion problem. The unknowns are 3D
+// points and camera parameters, and the measurements are image
+// coordinates describing the expected reprojected position for a
+// point in a camera. For example, we want to model the geometry of a
+// street scene with fire hydrants and cars, observed by a moving
+// camera with unknown parameters, and the only 3D points we care
+// about are the pointy tippy-tops of the fire hydrants. Our magic
+// image processing algorithm, which is responsible for producing the
+// measurements that are input to Ceres, has found and matched all
+// such tippy-tops in all image frames, except that in one of the
+// frame it mistook a car's headlight for a hydrant. If we didn't do
+// anything special (i.e. if we used a basic quadratic loss), the
+// residual for the erroneous measurement will result in extreme error
+// due to the quadratic nature of squared loss. This results in the
+// entire solution getting pulled away from the optimimum to reduce
+// the large error that would otherwise be attributed to the wrong
+// measurement.
+//
+// Using a robust loss function, the cost for large residuals is
+// reduced. In the example above, this leads to outlier terms getting
+// downweighted so they do not overly influence the final solution.
+//
+// What cost function is best?
+//
+// In general, there isn't a principled way to select a robust loss
+// function. The authors suggest starting with a non-robust cost, then
+// only experimenting with robust loss functions if standard squared
+// loss doesn't work.
+
+#ifndef CERES_PUBLIC_LOSS_FUNCTION_H_
+#define CERES_PUBLIC_LOSS_FUNCTION_H_
+
+#include <glog/logging.h>
+#include "ceres/internal/macros.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+class LossFunction {
+ public:
+ virtual ~LossFunction() {}
+
+ // For a residual vector with squared 2-norm 'sq_norm', this method
+ // is required to fill in the value and derivatives of the loss
+ // function (rho in this example):
+ //
+ // out[0] = rho(sq_norm),
+ // out[1] = rho'(sq_norm),
+ // out[2] = rho''(sq_norm),
+ //
+ // Here the convention is that the contribution of a term to the
+ // cost function is given by 1/2 rho(s), where
+ //
+ // s = ||residuals||^2.
+ //
+ // Calling the method with a negative value of 's' is an error and
+ // the implementations are not required to handle that case.
+ //
+ // Most sane choices of rho() satisfy:
+ //
+ // rho(0) = 0,
+ // rho'(0) = 1,
+ // rho'(s) < 1 in outlier region,
+ // rho''(s) < 0 in outlier region,
+ //
+ // so that they mimic the least squares cost for small residuals.
+ virtual void Evaluate(double sq_norm, double out[3]) const = 0;
+};
+
+// Some common implementations follow below.
+//
+// Note: in the region of interest (i.e. s < 3) we have:
+// TrivialLoss >= HuberLoss >= SoftLOneLoss >= CauchyLoss
+
+
+// This corresponds to no robustification.
+//
+// rho(s) = s
+//
+// At s = 0: rho = [0, 1, 0].
+//
+// It is not normally necessary to use this, as passing NULL for the
+// loss function when building the problem accomplishes the same
+// thing.
+class TrivialLoss : public LossFunction {
+ public:
+ virtual void Evaluate(double, double*) const;
+};
+
+// Scaling
+// -------
+// Given one robustifier
+// s -> rho(s)
+// one can change the length scale at which robustification takes
+// place, by adding a scale factor 'a' as follows:
+//
+// s -> a^2 rho(s / a^2).
+//
+// The first and second derivatives are:
+//
+// s -> rho'(s / a^2),
+// s -> (1 / a^2) rho''(s / a^2),
+//
+// but the behaviour near s = 0 is the same as the original function,
+// i.e.
+//
+// rho(s) = s + higher order terms,
+// a^2 rho(s / a^2) = s + higher order terms.
+//
+// The scalar 'a' should be positive.
+//
+// The reason for the appearance of squaring is that 'a' is in the
+// units of the residual vector norm whereas 's' is a squared
+// norm. For applications it is more convenient to specify 'a' than
+// its square. The commonly used robustifiers below are described in
+// un-scaled format (a = 1) but their implementations work for any
+// non-zero value of 'a'.
+
+// Huber.
+//
+// rho(s) = s for s <= 1,
+// rho(s) = 2 sqrt(s) - 1 for s >= 1.
+//
+// At s = 0: rho = [0, 1, 0].
+//
+// The scaling parameter 'a' corresponds to 'delta' on this page:
+// http://en.wikipedia.org/wiki/Huber_Loss_Function
+class HuberLoss : public LossFunction {
+ public:
+ explicit HuberLoss(double a) : a_(a), b_(a * a) { }
+ virtual void Evaluate(double, double*) const;
+
+ private:
+ const double a_;
+ // b = a^2.
+ const double b_;
+};
+
+// Soft L1, similar to Huber but smooth.
+//
+// rho(s) = 2 (sqrt(1 + s) - 1).
+//
+// At s = 0: rho = [0, 1, -1/2].
+class SoftLOneLoss : public LossFunction {
+ public:
+ explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) { }
+ virtual void Evaluate(double, double*) const;
+
+ private:
+ // b = a^2.
+ const double b_;
+ // c = 1 / a^2.
+ const double c_;
+};
+
+// Inspired by the Cauchy distribution
+//
+// rho(s) = log(1 + s).
+//
+// At s = 0: rho = [0, 1, -1].
+class CauchyLoss : public LossFunction {
+ public:
+ explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) { }
+ virtual void Evaluate(double, double*) const;
+
+ private:
+ // b = a^2.
+ const double b_;
+ // c = 1 / a^2.
+ const double c_;
+};
+
+// Loss that is capped beyond a certain level using the arc-tangent function.
+// The scaling parameter 'a' determines the level where falloff occurs.
+// For costs much smaller than 'a', the loss function is linear and behaves like
+// TrivialLoss, and for values much larger than 'a' the value asymptotically
+// approaches the constant value of a * PI / 2.
+//
+// rho(s) = a atan(s / a).
+//
+// At s = 0: rho = [0, 1, 0].
+class ArctanLoss : public LossFunction {
+ public:
+ explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) { }
+ virtual void Evaluate(double, double*) const;
+
+ private:
+ const double a_;
+ // b = 1 / a^2.
+ const double b_;
+};
+
+// Loss function that maps to approximately zero cost in a range around the
+// origin, and reverts to linear in error (quadratic in cost) beyond this range.
+// The tolerance parameter 'a' sets the nominal point at which the
+// transition occurs, and the transition size parameter 'b' sets the nominal
+// distance over which most of the transition occurs. Both a and b must be
+// greater than zero, and typically b will be set to a fraction of a.
+// The slope rho'[s] varies smoothly from about 0 at s <= a - b to
+// about 1 at s >= a + b.
+//
+// The term is computed as:
+//
+// rho(s) = b log(1 + exp((s - a) / b)) - c0.
+//
+// where c0 is chosen so that rho(0) == 0
+//
+// c0 = b log(1 + exp(-a / b)
+//
+// This has the following useful properties:
+//
+// rho(s) == 0 for s = 0
+// rho'(s) ~= 0 for s << a - b
+// rho'(s) ~= 1 for s >> a + b
+// rho''(s) > 0 for all s
+//
+// In addition, all derivatives are continuous, and the curvature is
+// concentrated in the range a - b to a + b.
+//
+// At s = 0: rho = [0, ~0, ~0].
+class TolerantLoss : public LossFunction {
+ public:
+ explicit TolerantLoss(double a, double b);
+ virtual void Evaluate(double, double*) const;
+
+ private:
+ const double a_, b_, c_;
+};
+
+// Composition of two loss functions. The error is the result of first
+// evaluating g followed by f to yield the composition f(g(s)).
+// The loss functions must not be NULL.
+class ComposedLoss : public LossFunction {
+ public:
+ explicit ComposedLoss(const LossFunction* f, Ownership ownership_f,
+ const LossFunction* g, Ownership ownership_g);
+ virtual ~ComposedLoss();
+ virtual void Evaluate(double, double*) const;
+
+ private:
+ internal::scoped_ptr<const LossFunction> f_, g_;
+ const Ownership ownership_f_, ownership_g_;
+};
+
+// The discussion above has to do with length scaling: it affects the space
+// in which s is measured. Sometimes you want to simply scale the output
+// value of the robustifier. For example, you might want to weight
+// different error terms differently (e.g., weight pixel reprojection
+// errors differently from terrain errors).
+//
+// If rho is the wrapped robustifier, then this simply outputs
+// s -> a * rho(s)
+//
+// The first and second derivatives are, not surprisingly
+// s -> a * rho'(s)
+// s -> a * rho''(s)
+//
+// Since we treat the a NULL Loss function as the Identity loss
+// function, rho = NULL is a valid input and will result in the input
+// being scaled by a. This provides a simple way of implementing a
+// scaled ResidualBlock.
+class ScaledLoss : public LossFunction {
+ public:
+ // Constructs a ScaledLoss wrapping another loss function. Takes
+ // ownership of the wrapped loss function or not depending on the
+ // ownership parameter.
+ ScaledLoss(const LossFunction* rho, double a, Ownership ownership) :
+ rho_(rho), a_(a), ownership_(ownership) { }
+
+ virtual ~ScaledLoss() {
+ if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
+ rho_.release();
+ }
+ }
+ virtual void Evaluate(double, double*) const;
+
+ private:
+ internal::scoped_ptr<const LossFunction> rho_;
+ const double a_;
+ const Ownership ownership_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(ScaledLoss);
+};
+
+// Sometimes after the optimization problem has been constructed, we
+// wish to mutate the scale of the loss function. For example, when
+// performing estimation from data which has substantial outliers,
+// convergence can be improved by starting out with a large scale,
+// optimizing the problem and then reducing the scale. This can have
+// better convergence behaviour than just using a loss function with a
+// small scale.
+//
+// This templated class allows the user to implement a loss function
+// whose scale can be mutated after an optimization problem has been
+// constructed.
+//
+// Example usage
+//
+// Problem problem;
+//
+// // Add parameter blocks
+//
+// CostFunction* cost_function =
+// new AutoDiffCostFunction < UW_Camera_Mapper, 2, 9, 3>(
+// new UW_Camera_Mapper(data->observations[2*i + 0],
+// data->observations[2*i + 1]));
+//
+// LossFunctionWrapper* loss_function(new HuberLoss(1.0), TAKE_OWNERSHIP);
+//
+// problem.AddResidualBlock(cost_function, loss_function, parameters);
+//
+// Solver::Options options;
+// scoped_ptr<Solver::Summary> summary1(Solve(problem, options));
+//
+// loss_function->Reset(new HuberLoss(1.0), TAKE_OWNERSHIP);
+//
+// scoped_ptr<Solver::Summary> summary2(Solve(problem, options));
+//
+class LossFunctionWrapper : public LossFunction {
+ public:
+ LossFunctionWrapper(LossFunction* rho, Ownership ownership)
+ : rho_(rho), ownership_(ownership) {
+ }
+
+ virtual ~LossFunctionWrapper() {
+ if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
+ rho_.release();
+ }
+ }
+
+ virtual void Evaluate(double sq_norm, double out[3]) const {
+ CHECK_NOTNULL(rho_.get());
+ rho_->Evaluate(sq_norm, out);
+ }
+
+ void Reset(LossFunction* rho, Ownership ownership) {
+ if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
+ rho_.release();
+ }
+ rho_.reset(rho);
+ ownership_ = ownership;
+ }
+
+ private:
+ internal::scoped_ptr<const LossFunction> rho_;
+ Ownership ownership_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(LossFunctionWrapper);
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_LOSS_FUNCTION_H_
diff --git a/include/ceres/normal_prior.h b/include/ceres/normal_prior.h
new file mode 100644
index 0000000..480a074
--- /dev/null
+++ b/include/ceres/normal_prior.h
@@ -0,0 +1,75 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Cost term that implements a prior on a parameter block using a
+// normal distribution.
+
+#ifndef CERES_PUBLIC_NORMAL_PRIOR_H_
+#define CERES_PUBLIC_NORMAL_PRIOR_H_
+
+#include "ceres/cost_function.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+
+// Implements a cost function of the form
+//
+// cost(x) = ||A(x - b)||^2
+//
+// where, the matrix A and the vector b are fixed and x is the
+// variable. In case the user is interested in implementing a cost
+// function of the form
+//
+// cost(x) = (x - mu)^T S^{-1} (x - mu)
+//
+// where, mu is a vector and S is a covariance matrix, then, A =
+// S^{-1/2}, i.e the matrix A is the square root of the inverse of the
+// covariance, also known as the stiffness matrix. There are however
+// no restrictions on the shape of A. It is free to be rectangular,
+// which would be the case if the covariance matrix S is rank
+// deficient.
+
+class NormalPrior: public CostFunction {
+ public:
+ // Check that the number of rows in the vector b are the same as the
+ // number of columns in the matrix A, crash otherwise.
+ NormalPrior(const Matrix& A, const Vector& b);
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const;
+ private:
+ Matrix A_;
+ Vector b_;
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_NORMAL_PRIOR_H_
diff --git a/include/ceres/numeric_diff_cost_function.h b/include/ceres/numeric_diff_cost_function.h
new file mode 100644
index 0000000..8544e44
--- /dev/null
+++ b/include/ceres/numeric_diff_cost_function.h
@@ -0,0 +1,285 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Create CostFunctions as needed by the least squares framework with jacobians
+// computed via numeric (a.k.a. finite) differentiation. For more details see
+// http://en.wikipedia.org/wiki/Numerical_differentiation.
+//
+// To get a numerically differentiated cost function, define a subclass of
+// CostFunction such that the Evaluate() function ignores the jacobian
+// parameter. The numeric differentiation wrapper will fill in the jacobian
+// parameter if nececssary by repeatedly calling the Evaluate() function with
+// small changes to the appropriate parameters, and computing the slope. For
+// performance, the numeric differentiation wrapper class is templated on the
+// concrete cost function, even though it could be implemented only in terms of
+// the virtual CostFunction interface.
+//
+// The numerically differentiated version of a cost function for a cost function
+// can be constructed as follows:
+//
+// CostFunction* cost_function
+// = new NumericDiffCostFunction<MyCostFunction, CENTRAL, 1, 4, 8>(
+// new MyCostFunction(...), TAKE_OWNERSHIP);
+//
+// where MyCostFunction has 1 residual and 2 parameter blocks with sizes 4 and 8
+// respectively. Look at the tests for a more detailed example.
+//
+// The central difference method is considerably more accurate at the cost of
+// twice as many function evaluations than forward difference. Consider using
+// central differences begin with, and only after that works, trying forward
+// difference to improve performance.
+//
+// TODO(keir): Characterize accuracy; mention pitfalls; provide alternatives.
+
+#ifndef CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_
+#define CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_
+
+#include <cstring>
+#include <glog/logging.h>
+#include "Eigen/Dense"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+enum NumericDiffMethod {
+ CENTRAL,
+ FORWARD
+};
+
+// This is split from the main class because C++ doesn't allow partial template
+// specializations for member functions. The alternative is to repeat the main
+// class for differing numbers of parameters, which is also unfortunate.
+template <typename CostFunctionNoJacobian,
+ int num_residuals,
+ int parameter_block_size,
+ int parameter_block,
+ NumericDiffMethod method>
+struct Differencer {
+ // Mutates parameters but must restore them before return.
+ static bool EvaluateJacobianForParameterBlock(
+ const CostFunctionNoJacobian *function,
+ double const* residuals_at_eval_point,
+ double **parameters,
+ double **jacobians) {
+ using Eigen::Map;
+ using Eigen::Matrix;
+ using Eigen::RowMajor;
+ using Eigen::ColMajor;
+
+ typedef Matrix<double, num_residuals, 1> ResidualVector;
+ typedef Matrix<double, parameter_block_size, 1> ParameterVector;
+ typedef Matrix<double, num_residuals, parameter_block_size,
+ (parameter_block_size == 1 &&
+ num_residuals > 1) ? ColMajor : RowMajor> JacobianMatrix;
+
+ Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block],
+ num_residuals,
+ parameter_block_size);
+
+ // Mutate 1 element at a time and then restore.
+ Map<ParameterVector> x_plus_delta(parameters[parameter_block],
+ parameter_block_size);
+ ParameterVector x(x_plus_delta);
+
+ // TODO(keir): Pick a smarter number! In theory a good choice is sqrt(eps) *
+ // x, which for doubles means about 1e-8 * x. However, I have found this
+ // number too optimistic. This number should be exposed for users to change.
+ const double kRelativeStepSize = 1e-6;
+
+ ParameterVector step_size = x.array().abs() * kRelativeStepSize;
+
+ // To handle cases where a parameter is exactly zero, instead use the mean
+ // step_size for the other dimensions.
+ double fallback_step_size = step_size.sum() / step_size.rows();
+ if (fallback_step_size == 0.0) {
+ // If all the parameters are zero, there's no good answer. Take
+ // kRelativeStepSize as a guess and hope for the best.
+ fallback_step_size = kRelativeStepSize;
+ }
+
+ // For each parameter in the parameter block, use finite differences to
+ // compute the derivative for that parameter.
+ for (int j = 0; j < parameter_block_size; ++j) {
+ if (step_size(j) == 0.0) {
+ // The parameter is exactly zero, so compromise and use the mean
+ // step_size from the other parameters. This can break in many cases,
+ // but it's hard to pick a good number without problem specific
+ // knowledge.
+ step_size(j) = fallback_step_size;
+ }
+ x_plus_delta(j) = x(j) + step_size(j);
+
+ double residuals[num_residuals]; // NOLINT
+ if (!function->Evaluate(parameters, residuals, NULL)) {
+ // Something went wrong; bail.
+ return false;
+ }
+
+ // Compute this column of the jacobian in 3 steps:
+ // 1. Store residuals for the forward part.
+ // 2. Subtract residuals for the backward (or 0) part.
+ // 3. Divide out the run.
+ parameter_jacobian.col(j) =
+ Map<const ResidualVector>(residuals, num_residuals);
+
+ double one_over_h = 1 / step_size(j);
+ if (method == CENTRAL) {
+ // Compute the function on the other side of x(j).
+ x_plus_delta(j) = x(j) - step_size(j);
+
+ if (!function->Evaluate(parameters, residuals, NULL)) {
+ // Something went wrong; bail.
+ return false;
+ }
+ parameter_jacobian.col(j) -=
+ Map<ResidualVector>(residuals, num_residuals, 1);
+ one_over_h /= 2;
+ } else {
+ // Forward difference only; reuse existing residuals evaluation.
+ parameter_jacobian.col(j) -=
+ Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
+ }
+ x_plus_delta(j) = x(j); // Restore x_plus_delta.
+
+ // Divide out the run to get slope.
+ parameter_jacobian.col(j) *= one_over_h;
+ }
+ return true;
+ }
+};
+
+// Prevent invalid instantiations.
+template <typename CostFunctionNoJacobian,
+ int num_residuals,
+ int parameter_block,
+ NumericDiffMethod method>
+struct Differencer<CostFunctionNoJacobian,
+ num_residuals,
+ 0 /* parameter_block_size */,
+ parameter_block,
+ method> {
+ static bool EvaluateJacobianForParameterBlock(
+ const CostFunctionNoJacobian *function,
+ double const* residuals_at_eval_point,
+ double **parameters,
+ double **jacobians) {
+ LOG(FATAL) << "Shouldn't get here.";
+ return true;
+ }
+};
+
+template <typename CostFunctionNoJacobian,
+ NumericDiffMethod method = CENTRAL, int M = 0,
+ int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0, int N5 = 0>
+class NumericDiffCostFunction
+ : public SizedCostFunction<M, N0, N1, N2, N3, N4, N5> {
+ public:
+ NumericDiffCostFunction(CostFunctionNoJacobian* function,
+ Ownership ownership)
+ : function_(function), ownership_(ownership) {}
+
+ virtual ~NumericDiffCostFunction() {
+ if (ownership_ != TAKE_OWNERSHIP) {
+ function_.release();
+ }
+ }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ // Get the function value (residuals) at the the point to evaluate.
+ bool success = function_->Evaluate(parameters, residuals, NULL);
+ if (!success) {
+ // Something went wrong; ignore the jacobian.
+ return false;
+ }
+ if (!jacobians) {
+ // Nothing to do; just forward.
+ return true;
+ }
+
+ // Create a copy of the parameters which will get mutated.
+ const int kParametersSize = N0 + N1 + N2 + N3 + N4 + N5;
+ double parameters_copy[kParametersSize];
+ double *parameters_references_copy[6];
+ parameters_references_copy[0] = &parameters_copy[0];
+ parameters_references_copy[1] = &parameters_copy[0] + N0;
+ parameters_references_copy[2] = &parameters_copy[0] + N0 + N1;
+ parameters_references_copy[3] = &parameters_copy[0] + N0 + N1 + N2;
+ parameters_references_copy[4] = &parameters_copy[0] + N0 + N1 + N2 + N3;
+ parameters_references_copy[5] =
+ &parameters_copy[0] + N0 + N1 + N2 + N3 + N4;
+
+#define COPY_PARAMETER_BLOCK(block) \
+ if (N ## block) memcpy(parameters_references_copy[block], \
+ parameters[block], \
+ sizeof(double) * N ## block); // NOLINT
+ COPY_PARAMETER_BLOCK(0);
+ COPY_PARAMETER_BLOCK(1);
+ COPY_PARAMETER_BLOCK(2);
+ COPY_PARAMETER_BLOCK(3);
+ COPY_PARAMETER_BLOCK(4);
+ COPY_PARAMETER_BLOCK(5);
+#undef COPY_PARAMETER_BLOCK
+
+#define EVALUATE_JACOBIAN_FOR_BLOCK(block) \
+ if (N ## block && jacobians[block]) { \
+ if (!Differencer<CostFunctionNoJacobian, /* NOLINT */ \
+ M, \
+ N ## block, \
+ block, \
+ method>::EvaluateJacobianForParameterBlock( \
+ function_.get(), \
+ residuals, \
+ parameters_references_copy, \
+ jacobians)) { \
+ return false; \
+ } \
+ }
+ EVALUATE_JACOBIAN_FOR_BLOCK(0);
+ EVALUATE_JACOBIAN_FOR_BLOCK(1);
+ EVALUATE_JACOBIAN_FOR_BLOCK(2);
+ EVALUATE_JACOBIAN_FOR_BLOCK(3);
+ EVALUATE_JACOBIAN_FOR_BLOCK(4);
+ EVALUATE_JACOBIAN_FOR_BLOCK(5);
+#undef EVALUATE_JACOBIAN_FOR_BLOCK
+ return true;
+ }
+
+ private:
+ internal::scoped_ptr<CostFunctionNoJacobian> function_;
+ Ownership ownership_;
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_
diff --git a/include/ceres/ordered_groups.h b/include/ceres/ordered_groups.h
new file mode 100644
index 0000000..e373d35
--- /dev/null
+++ b/include/ceres/ordered_groups.h
@@ -0,0 +1,176 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_PUBLIC_ORDERED_GROUPS_H_
+#define CERES_PUBLIC_ORDERED_GROUPS_H_
+
+#include <map>
+#include <set>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+// A class for storing and manipulating an ordered collection of
+// groups/sets with the following semantics:
+//
+// Group ids are non-negative integer values. Elements are any type
+// that can serve as a key in a map or an element of a set.
+//
+// An element can only belong to one group at a time. A group may
+// contain an arbitrary number of elements.
+//
+// Groups are ordered by their group id.
+template <typename T>
+class OrderedGroups {
+ public:
+ // Add an element to a group. If a group with this id does not
+ // exist, one is created. This method can be called any number of
+ // times for the same element. Group ids should be non-negative
+ // numbers.
+ //
+ // Return value indicates if adding the element was a success.
+ bool AddElementToGroup(const T element, const int group) {
+ if (group < 0) {
+ return false;
+ }
+
+ typename map<T, int>::const_iterator it = element_to_group_.find(element);
+ if (it != element_to_group_.end()) {
+ if (it->second == group) {
+ // Element is already in the right group, nothing to do.
+ return true;
+ }
+
+ group_to_elements_[it->second].erase(element);
+ if (group_to_elements_[it->second].size() == 0) {
+ group_to_elements_.erase(it->second);
+ }
+ }
+
+ element_to_group_[element] = group;
+ group_to_elements_[group].insert(element);
+ return true;
+ }
+
+ void Clear() {
+ group_to_elements_.clear();
+ element_to_group_.clear();
+ }
+
+ // Remove the element, no matter what group it is in. If the element
+ // is not a member of any group, calling this method will result in
+ // a crash.
+ //
+ // Return value indicates if the element was actually removed.
+ bool Remove(const T element) {
+ const int current_group = GroupId(element);
+ if (current_group < 0) {
+ return false;
+ }
+
+ group_to_elements_[current_group].erase(element);
+
+ if (group_to_elements_[current_group].size() == 0) {
+ // If the group is empty, then get rid of it.
+ group_to_elements_.erase(current_group);
+ }
+
+ element_to_group_.erase(element);
+ return true;
+ }
+
+ // Reverse the order of the groups in place.
+ void Reverse() {
+ typename map<int, set<T> >::reverse_iterator it =
+ group_to_elements_.rbegin();
+ map<int, set<T> > new_group_to_elements;
+ new_group_to_elements[it->first] = it->second;
+
+ int new_group_id = it->first + 1;
+ for (++it; it != group_to_elements_.rend(); ++it) {
+ for (typename set<T>::const_iterator element_it = it->second.begin();
+ element_it != it->second.end();
+ ++element_it) {
+ element_to_group_[*element_it] = new_group_id;
+ }
+ new_group_to_elements[new_group_id] = it->second;
+ new_group_id++;
+ }
+
+ group_to_elements_.swap(new_group_to_elements);
+ }
+
+ // Return the group id for the element. If the element is not a
+ // member of any group, return -1.
+ int GroupId(const T element) const {
+ typename map<T, int>::const_iterator it = element_to_group_.find(element);
+ if (it == element_to_group_.end()) {
+ return -1;
+ }
+ return it->second;
+ }
+
+ bool IsMember(const T element) const {
+ typename map<T, int>::const_iterator it = element_to_group_.find(element);
+ return (it != element_to_group_.end());
+ }
+
+ // This function always succeeds, i.e., implicitly there exists a
+ // group for every integer.
+ int GroupSize(const int group) const {
+ typename map<int, set<T> >::const_iterator it =
+ group_to_elements_.find(group);
+ return (it == group_to_elements_.end()) ? 0 : it->second.size();
+ }
+
+ int NumElements() const {
+ return element_to_group_.size();
+ }
+
+ // Number of groups with one or more elements.
+ int NumGroups() const {
+ return group_to_elements_.size();
+ }
+
+ const map<int, set<T> >& group_to_elements() const {
+ return group_to_elements_;
+ }
+
+ private:
+ map<int, set<T> > group_to_elements_;
+ map<T, int> element_to_group_;
+};
+
+// Typedef for the most commonly used version of OrderedGroups.
+typedef OrderedGroups<double*> ParameterBlockOrdering;
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_ORDERED_GROUP_H_
diff --git a/include/ceres/problem.h b/include/ceres/problem.h
new file mode 100644
index 0000000..201cc7f
--- /dev/null
+++ b/include/ceres/problem.h
@@ -0,0 +1,286 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+// keir@google.com (Keir Mierle)
+//
+// The Problem object is used to build and hold least squares problems.
+
+#ifndef CERES_PUBLIC_PROBLEM_H_
+#define CERES_PUBLIC_PROBLEM_H_
+
+#include <cstddef>
+#include <map>
+#include <set>
+#include <vector>
+
+#include <glog/logging.h>
+#include "ceres/internal/macros.h"
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+class CostFunction;
+class LossFunction;
+class LocalParameterization;
+class Solver;
+
+namespace internal {
+class Preprocessor;
+class ProblemImpl;
+class ParameterBlock;
+class ResidualBlock;
+} // namespace internal
+
+// A ResidualBlockId is a handle clients can use to delete residual
+// blocks after creating them. They are opaque for any purposes other
+// than that.
+typedef const internal::ResidualBlock* ResidualBlockId;
+
+// A class to represent non-linear least squares problems. Such
+// problems have a cost function that is a sum of error terms (known
+// as "residuals"), where each residual is a function of some subset
+// of the parameters. The cost function takes the form
+//
+// N 1
+// SUM --- loss( || r_i1, r_i2,..., r_ik ||^2 ),
+// i=1 2
+//
+// where
+//
+// r_ij is residual number i, component j; the residual is a
+// function of some subset of the parameters x1...xk. For
+// example, in a structure from motion problem a residual
+// might be the difference between a measured point in an
+// image and the reprojected position for the matching
+// camera, point pair. The residual would have two
+// components, error in x and error in y.
+//
+// loss(y) is the loss function; for example, squared error or
+// Huber L1 loss. If loss(y) = y, then the cost function is
+// non-robustified least squares.
+//
+// This class is specifically designed to address the important subset
+// of "sparse" least squares problems, where each component of the
+// residual depends only on a small number number of parameters, even
+// though the total number of residuals and parameters may be very
+// large. This property affords tremendous gains in scale, allowing
+// efficient solving of large problems that are otherwise
+// inaccessible.
+//
+// The canonical example of a sparse least squares problem is
+// "structure-from-motion" (SFM), where the parameters are points and
+// cameras, and residuals are reprojection errors. Typically a single
+// residual will depend only on 9 parameters (3 for the point, 6 for
+// the camera).
+//
+// To create a least squares problem, use the AddResidualBlock() and
+// AddParameterBlock() methods, documented below. Here is an example least
+// squares problem containing 3 parameter blocks of sizes 3, 4 and 5
+// respectively and two residual terms of size 2 and 6:
+//
+// double x1[] = { 1.0, 2.0, 3.0 };
+// double x2[] = { 1.0, 2.0, 3.0, 5.0 };
+// double x3[] = { 1.0, 2.0, 3.0, 6.0, 7.0 };
+//
+// Problem problem;
+//
+// problem.AddResidualBlock(new MyUnaryCostFunction(...), x1);
+// problem.AddResidualBlock(new MyBinaryCostFunction(...), x2, x3);
+//
+// Please see cost_function.h for details of the CostFunction object.
+class Problem {
+ public:
+ struct Options {
+ Options()
+ : cost_function_ownership(TAKE_OWNERSHIP),
+ loss_function_ownership(TAKE_OWNERSHIP),
+ local_parameterization_ownership(TAKE_OWNERSHIP) {}
+
+ // These flags control whether the Problem object owns the cost
+ // functions, loss functions, and parameterizations passed into
+ // the Problem. If set to TAKE_OWNERSHIP, then the problem object
+ // will delete the corresponding cost or loss functions on
+ // destruction. The destructor is careful to delete the pointers
+ // only once, since sharing cost/loss/parameterizations is
+ // allowed.
+ Ownership cost_function_ownership;
+ Ownership loss_function_ownership;
+ Ownership local_parameterization_ownership;
+ };
+
+ // The default constructor is equivalent to the
+ // invocation Problem(Problem::Options()).
+ Problem();
+ explicit Problem(const Options& options);
+
+ ~Problem();
+
+ // Add a residual block to the overall cost function. The cost
+ // function carries with it information about the sizes of the
+ // parameter blocks it expects. The function checks that these match
+ // the sizes of the parameter blocks listed in parameter_blocks. The
+ // program aborts if a mismatch is detected. loss_function can be
+ // NULL, in which case the cost of the term is just the squared norm
+ // of the residuals.
+ //
+ // The user has the option of explicitly adding the parameter blocks
+ // using AddParameterBlock. This causes additional correctness
+ // checking; however, AddResidualBlock implicitly adds the parameter
+ // blocks if they are not present, so calling AddParameterBlock
+ // explicitly is not required.
+ //
+ // The Problem object by default takes ownership of the
+ // cost_function and loss_function pointers. These objects remain
+ // live for the life of the Problem object. If the user wishes to
+ // keep control over the destruction of these objects, then they can
+ // do this by setting the corresponding enums in the Options struct.
+ //
+ // Note: Even though the Problem takes ownership of cost_function
+ // and loss_function, it does not preclude the user from re-using
+ // them in another residual block. The destructor takes care to call
+ // delete on each cost_function or loss_function pointer only once,
+ // regardless of how many residual blocks refer to them.
+ //
+ // Example usage:
+ //
+ // double x1[] = {1.0, 2.0, 3.0};
+ // double x2[] = {1.0, 2.0, 5.0, 6.0};
+ // double x3[] = {3.0, 6.0, 2.0, 5.0, 1.0};
+ //
+ // Problem problem;
+ //
+ // problem.AddResidualBlock(new MyUnaryCostFunction(...), NULL, x1);
+ // problem.AddResidualBlock(new MyBinaryCostFunction(...), NULL, x2, x1);
+ //
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ const vector<double*>& parameter_blocks);
+
+ // Convenience methods for adding residuals with a small number of
+ // parameters. This is the common case. Instead of specifying the
+ // parameter block arguments as a vector, list them as pointers.
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3, double* x4);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3, double* x4, double* x5);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3, double* x4, double* x5,
+ double* x6);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3, double* x4, double* x5,
+ double* x6, double* x7);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3, double* x4, double* x5,
+ double* x6, double* x7, double* x8);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3, double* x4, double* x5,
+ double* x6, double* x7, double* x8,
+ double* x9);
+
+ // Add a parameter block with appropriate size to the problem.
+ // Repeated calls with the same arguments are ignored. Repeated
+ // calls with the same double pointer but a different size results
+ // in undefined behaviour.
+ void AddParameterBlock(double* values, int size);
+
+ // Add a parameter block with appropriate size and parameterization
+ // to the problem. Repeated calls with the same arguments are
+ // ignored. Repeated calls with the same double pointer but a
+ // different size results in undefined behaviour.
+ void AddParameterBlock(double* values,
+ int size,
+ LocalParameterization* local_parameterization);
+
+ // Hold the indicated parameter block constant during optimization.
+ void SetParameterBlockConstant(double* values);
+
+ // Allow the indicated parameter to vary during optimization.
+ void SetParameterBlockVariable(double* values);
+
+ // Set the local parameterization for one of the parameter blocks.
+ // The local_parameterization is owned by the Problem by default. It
+ // is acceptable to set the same parameterization for multiple
+ // parameters; the destructor is careful to delete local
+ // parameterizations only once. The local parameterization can only
+ // be set once per parameter, and cannot be changed once set.
+ void SetParameterization(double* values,
+ LocalParameterization* local_parameterization);
+
+ // Number of parameter blocks in the problem. Always equals
+ // parameter_blocks().size() and parameter_block_sizes().size().
+ int NumParameterBlocks() const;
+
+ // The size of the parameter vector obtained by summing over the
+ // sizes of all the parameter blocks.
+ int NumParameters() const;
+
+ // Number of residual blocks in the problem. Always equals
+ // residual_blocks().size().
+ int NumResidualBlocks() const;
+
+ // The size of the residual vector obtained by summing over the
+ // sizes of all of the residual blocks.
+ int NumResiduals() const;
+
+ private:
+ friend class Solver;
+ internal::scoped_ptr<internal::ProblemImpl> problem_impl_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(Problem);
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_PROBLEM_H_
diff --git a/include/ceres/rotation.h b/include/ceres/rotation.h
new file mode 100644
index 0000000..0d8a390
--- /dev/null
+++ b/include/ceres/rotation.h
@@ -0,0 +1,534 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+// sameeragarwal@google.com (Sameer Agarwal)
+//
+// Templated functions for manipulating rotations. The templated
+// functions are useful when implementing functors for automatic
+// differentiation.
+//
+// In the following, the Quaternions are laid out as 4-vectors, thus:
+//
+// q[0] scalar part.
+// q[1] coefficient of i.
+// q[2] coefficient of j.
+// q[3] coefficient of k.
+//
+// where: i*i = j*j = k*k = -1 and i*j = k, j*k = i, k*i = j.
+
+#ifndef CERES_PUBLIC_ROTATION_H_
+#define CERES_PUBLIC_ROTATION_H_
+
+#include <algorithm>
+#include <cmath>
+#include "glog/logging.h"
+
+namespace ceres {
+
+// Convert a value in combined axis-angle representation to a quaternion.
+// The value angle_axis is a triple whose norm is an angle in radians,
+// and whose direction is aligned with the axis of rotation,
+// and quaternion is a 4-tuple that will contain the resulting quaternion.
+// The implementation may be used with auto-differentiation up to the first
+// derivative, higher derivatives may have unexpected results near the origin.
+template<typename T>
+void AngleAxisToQuaternion(T const* angle_axis, T* quaternion);
+
+// Convert a quaternion to the equivalent combined axis-angle representation.
+// The value quaternion must be a unit quaternion - it is not normalized first,
+// and angle_axis will be filled with a value whose norm is the angle of
+// rotation in radians, and whose direction is the axis of rotation.
+// The implemention may be used with auto-differentiation up to the first
+// derivative, higher derivatives may have unexpected results near the origin.
+template<typename T>
+void QuaternionToAngleAxis(T const* quaternion, T* angle_axis);
+
+// Conversions between 3x3 rotation matrix (in column major order) and
+// axis-angle rotation representations. Templated for use with
+// autodifferentiation.
+template <typename T>
+void RotationMatrixToAngleAxis(T const * R, T * angle_axis);
+template <typename T>
+void AngleAxisToRotationMatrix(T const * angle_axis, T * R);
+
+// Conversions between 3x3 rotation matrix (in row major order) and
+// Euler angle (in degrees) rotation representations.
+//
+// The {pitch,roll,yaw} Euler angles are rotations around the {x,y,z}
+// axes, respectively. They are applied in that same order, so the
+// total rotation R is Rz * Ry * Rx.
+template <typename T>
+void EulerAnglesToRotationMatrix(const T* euler, int row_stride, T* R);
+
+// Convert a 4-vector to a 3x3 scaled rotation matrix.
+//
+// The choice of rotation is such that the quaternion [1 0 0 0] goes to an
+// identity matrix and for small a, b, c the quaternion [1 a b c] goes to
+// the matrix
+//
+// [ 0 -c b ]
+// I + 2 [ c 0 -a ] + higher order terms
+// [ -b a 0 ]
+//
+// which corresponds to a Rodrigues approximation, the last matrix being
+// the cross-product matrix of [a b c]. Together with the property that
+// R(q1 * q2) = R(q1) * R(q2) this uniquely defines the mapping from q to R.
+//
+// The rotation matrix is row-major.
+//
+// No normalization of the quaternion is performed, i.e.
+// R = ||q||^2 * Q, where Q is an orthonormal matrix
+// such that det(Q) = 1 and Q*Q' = I
+template <typename T> inline
+void QuaternionToScaledRotation(const T q[4], T R[3 * 3]);
+
+// Same as above except that the rotation matrix is normalized by the
+// Frobenius norm, so that R * R' = I (and det(R) = 1).
+template <typename T> inline
+void QuaternionToRotation(const T q[4], T R[3 * 3]);
+
+// Rotates a point pt by a quaternion q:
+//
+// result = R(q) * pt
+//
+// Assumes the quaternion is unit norm. This assumption allows us to
+// write the transform as (something)*pt + pt, as is clear from the
+// formula below. If you pass in a quaternion with |q|^2 = 2 then you
+// WILL NOT get back 2 times the result you get for a unit quaternion.
+template <typename T> inline
+void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]);
+
+// With this function you do not need to assume that q has unit norm.
+// It does assume that the norm is non-zero.
+template <typename T> inline
+void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]);
+
+// zw = z * w, where * is the Quaternion product between 4 vectors.
+template<typename T> inline
+void QuaternionProduct(const T z[4], const T w[4], T zw[4]);
+
+// xy = x cross y;
+template<typename T> inline
+void CrossProduct(const T x[3], const T y[3], T x_cross_y[3]);
+
+template<typename T> inline
+T DotProduct(const T x[3], const T y[3]);
+
+// y = R(angle_axis) * x;
+template<typename T> inline
+void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]);
+
+// --- IMPLEMENTATION
+
+template<typename T>
+inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion) {
+ const T& a0 = angle_axis[0];
+ const T& a1 = angle_axis[1];
+ const T& a2 = angle_axis[2];
+ const T theta_squared = a0 * a0 + a1 * a1 + a2 * a2;
+
+ // For points not at the origin, the full conversion is numerically stable.
+ if (theta_squared > T(0.0)) {
+ const T theta = sqrt(theta_squared);
+ const T half_theta = theta * T(0.5);
+ const T k = sin(half_theta) / theta;
+ quaternion[0] = cos(half_theta);
+ quaternion[1] = a0 * k;
+ quaternion[2] = a1 * k;
+ quaternion[3] = a2 * k;
+ } else {
+ // At the origin, sqrt() will produce NaN in the derivative since
+ // the argument is zero. By approximating with a Taylor series,
+ // and truncating at one term, the value and first derivatives will be
+ // computed correctly when Jets are used.
+ const T k(0.5);
+ quaternion[0] = T(1.0);
+ quaternion[1] = a0 * k;
+ quaternion[2] = a1 * k;
+ quaternion[3] = a2 * k;
+ }
+}
+
+template<typename T>
+inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) {
+ const T& q1 = quaternion[1];
+ const T& q2 = quaternion[2];
+ const T& q3 = quaternion[3];
+ const T sin_squared_theta = q1 * q1 + q2 * q2 + q3 * q3;
+
+ // For quaternions representing non-zero rotation, the conversion
+ // is numerically stable.
+ if (sin_squared_theta > T(0.0)) {
+ const T sin_theta = sqrt(sin_squared_theta);
+ const T& cos_theta = quaternion[0];
+
+ // If cos_theta is negative, theta is greater than pi/2, which
+ // means that angle for the angle_axis vector which is 2 * theta
+ // would be greater than pi.
+ //
+ // While this will result in the correct rotation, it does not
+ // result in a normalized angle-axis vector.
+ //
+ // In that case we observe that 2 * theta ~ 2 * theta - 2 * pi,
+ // which is equivalent saying
+ //
+ // theta - pi = atan(sin(theta - pi), cos(theta - pi))
+ // = atan(-sin(theta), -cos(theta))
+ //
+ const T two_theta =
+ T(2.0) * ((cos_theta < 0.0)
+ ? atan2(-sin_theta, -cos_theta)
+ : atan2(sin_theta, cos_theta));
+ const T k = two_theta / sin_theta;
+ angle_axis[0] = q1 * k;
+ angle_axis[1] = q2 * k;
+ angle_axis[2] = q3 * k;
+ } else {
+ // For zero rotation, sqrt() will produce NaN in the derivative since
+ // the argument is zero. By approximating with a Taylor series,
+ // and truncating at one term, the value and first derivatives will be
+ // computed correctly when Jets are used.
+ const T k(2.0);
+ angle_axis[0] = q1 * k;
+ angle_axis[1] = q2 * k;
+ angle_axis[2] = q3 * k;
+ }
+}
+
+// The conversion of a rotation matrix to the angle-axis form is
+// numerically problematic when then rotation angle is close to zero
+// or to Pi. The following implementation detects when these two cases
+// occurs and deals with them by taking code paths that are guaranteed
+// to not perform division by a small number.
+template <typename T>
+inline void RotationMatrixToAngleAxis(const T * R, T * angle_axis) {
+ // x = k * 2 * sin(theta), where k is the axis of rotation.
+ angle_axis[0] = R[5] - R[7];
+ angle_axis[1] = R[6] - R[2];
+ angle_axis[2] = R[1] - R[3];
+
+ static const T kOne = T(1.0);
+ static const T kTwo = T(2.0);
+
+ // Since the right hand side may give numbers just above 1.0 or
+ // below -1.0 leading to atan misbehaving, we threshold.
+ T costheta = std::min(std::max((R[0] + R[4] + R[8] - kOne) / kTwo,
+ T(-1.0)),
+ kOne);
+
+ // sqrt is guaranteed to give non-negative results, so we only
+ // threshold above.
+ T sintheta = std::min(sqrt(angle_axis[0] * angle_axis[0] +
+ angle_axis[1] * angle_axis[1] +
+ angle_axis[2] * angle_axis[2]) / kTwo,
+ kOne);
+
+ // Use the arctan2 to get the right sign on theta
+ const T theta = atan2(sintheta, costheta);
+
+ // Case 1: sin(theta) is large enough, so dividing by it is not a
+ // problem. We do not use abs here, because while jets.h imports
+ // std::abs into the namespace, here in this file, abs resolves to
+ // the int version of the function, which returns zero always.
+ //
+ // We use a threshold much larger then the machine epsilon, because
+ // if sin(theta) is small, not only do we risk overflow but even if
+ // that does not occur, just dividing by a small number will result
+ // in numerical garbage. So we play it safe.
+ static const double kThreshold = 1e-12;
+ if ((sintheta > kThreshold) || (sintheta < -kThreshold)) {
+ const T r = theta / (kTwo * sintheta);
+ for (int i = 0; i < 3; ++i) {
+ angle_axis[i] *= r;
+ }
+ return;
+ }
+
+ // Case 2: theta ~ 0, means sin(theta) ~ theta to a good
+ // approximation.
+ if (costheta > 0.0) {
+ const T kHalf = T(0.5);
+ for (int i = 0; i < 3; ++i) {
+ angle_axis[i] *= kHalf;
+ }
+ return;
+ }
+
+ // Case 3: theta ~ pi, this is the hard case. Since theta is large,
+ // and sin(theta) is small. Dividing by theta by sin(theta) will
+ // either give an overflow or worse still numerically meaningless
+ // results. Thus we use an alternate more complicated formula
+ // here.
+
+ // Since cos(theta) is negative, division by (1-cos(theta)) cannot
+ // overflow.
+ const T inv_one_minus_costheta = kOne / (kOne - costheta);
+
+ // We now compute the absolute value of coordinates of the axis
+ // vector using the diagonal entries of R. To resolve the sign of
+ // these entries, we compare the sign of angle_axis[i]*sin(theta)
+ // with the sign of sin(theta). If they are the same, then
+ // angle_axis[i] should be positive, otherwise negative.
+ for (int i = 0; i < 3; ++i) {
+ angle_axis[i] = theta * sqrt((R[i*4] - costheta) * inv_one_minus_costheta);
+ if (((sintheta < 0.0) && (angle_axis[i] > 0.0)) ||
+ ((sintheta > 0.0) && (angle_axis[i] < 0.0))) {
+ angle_axis[i] = -angle_axis[i];
+ }
+ }
+}
+
+template <typename T>
+inline void AngleAxisToRotationMatrix(const T * angle_axis, T * R) {
+ static const T kOne = T(1.0);
+ const T theta2 = DotProduct(angle_axis, angle_axis);
+ if (theta2 > 0.0) {
+ // We want to be careful to only evaluate the square root if the
+ // norm of the angle_axis vector is greater than zero. Otherwise
+ // we get a division by zero.
+ const T theta = sqrt(theta2);
+ const T wx = angle_axis[0] / theta;
+ const T wy = angle_axis[1] / theta;
+ const T wz = angle_axis[2] / theta;
+
+ const T costheta = cos(theta);
+ const T sintheta = sin(theta);
+
+ R[0] = costheta + wx*wx*(kOne - costheta);
+ R[1] = wz*sintheta + wx*wy*(kOne - costheta);
+ R[2] = -wy*sintheta + wx*wz*(kOne - costheta);
+ R[3] = wx*wy*(kOne - costheta) - wz*sintheta;
+ R[4] = costheta + wy*wy*(kOne - costheta);
+ R[5] = wx*sintheta + wy*wz*(kOne - costheta);
+ R[6] = wy*sintheta + wx*wz*(kOne - costheta);
+ R[7] = -wx*sintheta + wy*wz*(kOne - costheta);
+ R[8] = costheta + wz*wz*(kOne - costheta);
+ } else {
+ // At zero, we switch to using the first order Taylor expansion.
+ R[0] = kOne;
+ R[1] = -angle_axis[2];
+ R[2] = angle_axis[1];
+ R[3] = angle_axis[2];
+ R[4] = kOne;
+ R[5] = -angle_axis[0];
+ R[6] = -angle_axis[1];
+ R[7] = angle_axis[0];
+ R[8] = kOne;
+ }
+}
+
+template <typename T>
+inline void EulerAnglesToRotationMatrix(const T* euler,
+ const int row_stride,
+ T* R) {
+ const double kPi = 3.14159265358979323846;
+ const T degrees_to_radians(kPi / 180.0);
+
+ const T pitch(euler[0] * degrees_to_radians);
+ const T roll(euler[1] * degrees_to_radians);
+ const T yaw(euler[2] * degrees_to_radians);
+
+ const T c1 = cos(yaw);
+ const T s1 = sin(yaw);
+ const T c2 = cos(roll);
+ const T s2 = sin(roll);
+ const T c3 = cos(pitch);
+ const T s3 = sin(pitch);
+
+ // Rows of the rotation matrix.
+ T* R1 = R;
+ T* R2 = R1 + row_stride;
+ T* R3 = R2 + row_stride;
+
+ R1[0] = c1*c2;
+ R1[1] = -s1*c3 + c1*s2*s3;
+ R1[2] = s1*s3 + c1*s2*c3;
+
+ R2[0] = s1*c2;
+ R2[1] = c1*c3 + s1*s2*s3;
+ R2[2] = -c1*s3 + s1*s2*c3;
+
+ R3[0] = -s2;
+ R3[1] = c2*s3;
+ R3[2] = c2*c3;
+}
+
+template <typename T> inline
+void QuaternionToScaledRotation(const T q[4], T R[3 * 3]) {
+ // Make convenient names for elements of q.
+ T a = q[0];
+ T b = q[1];
+ T c = q[2];
+ T d = q[3];
+ // This is not to eliminate common sub-expression, but to
+ // make the lines shorter so that they fit in 80 columns!
+ T aa = a * a;
+ T ab = a * b;
+ T ac = a * c;
+ T ad = a * d;
+ T bb = b * b;
+ T bc = b * c;
+ T bd = b * d;
+ T cc = c * c;
+ T cd = c * d;
+ T dd = d * d;
+
+ R[0] = aa + bb - cc - dd; R[1] = T(2) * (bc - ad); R[2] = T(2) * (ac + bd); // NOLINT
+ R[3] = T(2) * (ad + bc); R[4] = aa - bb + cc - dd; R[5] = T(2) * (cd - ab); // NOLINT
+ R[6] = T(2) * (bd - ac); R[7] = T(2) * (ab + cd); R[8] = aa - bb - cc + dd; // NOLINT
+}
+
+template <typename T> inline
+void QuaternionToRotation(const T q[4], T R[3 * 3]) {
+ QuaternionToScaledRotation(q, R);
+
+ T normalizer = q[0]*q[0] + q[1]*q[1] + q[2]*q[2] + q[3]*q[3];
+ CHECK_NE(normalizer, T(0));
+ normalizer = T(1) / normalizer;
+
+ for (int i = 0; i < 9; ++i) {
+ R[i] *= normalizer;
+ }
+}
+
+template <typename T> inline
+void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
+ const T t2 = q[0] * q[1];
+ const T t3 = q[0] * q[2];
+ const T t4 = q[0] * q[3];
+ const T t5 = -q[1] * q[1];
+ const T t6 = q[1] * q[2];
+ const T t7 = q[1] * q[3];
+ const T t8 = -q[2] * q[2];
+ const T t9 = q[2] * q[3];
+ const T t1 = -q[3] * q[3];
+ result[0] = T(2) * ((t8 + t1) * pt[0] + (t6 - t4) * pt[1] + (t3 + t7) * pt[2]) + pt[0]; // NOLINT
+ result[1] = T(2) * ((t4 + t6) * pt[0] + (t5 + t1) * pt[1] + (t9 - t2) * pt[2]) + pt[1]; // NOLINT
+ result[2] = T(2) * ((t7 - t3) * pt[0] + (t2 + t9) * pt[1] + (t5 + t8) * pt[2]) + pt[2]; // NOLINT
+}
+
+
+template <typename T> inline
+void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
+ // 'scale' is 1 / norm(q).
+ const T scale = T(1) / sqrt(q[0] * q[0] +
+ q[1] * q[1] +
+ q[2] * q[2] +
+ q[3] * q[3]);
+
+ // Make unit-norm version of q.
+ const T unit[4] = {
+ scale * q[0],
+ scale * q[1],
+ scale * q[2],
+ scale * q[3],
+ };
+
+ UnitQuaternionRotatePoint(unit, pt, result);
+}
+
+template<typename T> inline
+void QuaternionProduct(const T z[4], const T w[4], T zw[4]) {
+ zw[0] = z[0] * w[0] - z[1] * w[1] - z[2] * w[2] - z[3] * w[3];
+ zw[1] = z[0] * w[1] + z[1] * w[0] + z[2] * w[3] - z[3] * w[2];
+ zw[2] = z[0] * w[2] - z[1] * w[3] + z[2] * w[0] + z[3] * w[1];
+ zw[3] = z[0] * w[3] + z[1] * w[2] - z[2] * w[1] + z[3] * w[0];
+}
+
+// xy = x cross y;
+template<typename T> inline
+void CrossProduct(const T x[3], const T y[3], T x_cross_y[3]) {
+ x_cross_y[0] = x[1] * y[2] - x[2] * y[1];
+ x_cross_y[1] = x[2] * y[0] - x[0] * y[2];
+ x_cross_y[2] = x[0] * y[1] - x[1] * y[0];
+}
+
+template<typename T> inline
+T DotProduct(const T x[3], const T y[3]) {
+ return (x[0] * y[0] + x[1] * y[1] + x[2] * y[2]);
+}
+
+template<typename T> inline
+void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
+ T w[3];
+ T sintheta;
+ T costheta;
+
+ const T theta2 = DotProduct(angle_axis, angle_axis);
+ if (theta2 > 0.0) {
+ // Away from zero, use the rodriguez formula
+ //
+ // result = pt costheta +
+ // (w x pt) * sintheta +
+ // w (w . pt) (1 - costheta)
+ //
+ // We want to be careful to only evaluate the square root if the
+ // norm of the angle_axis vector is greater than zero. Otherwise
+ // we get a division by zero.
+ //
+ const T theta = sqrt(theta2);
+ w[0] = angle_axis[0] / theta;
+ w[1] = angle_axis[1] / theta;
+ w[2] = angle_axis[2] / theta;
+ costheta = cos(theta);
+ sintheta = sin(theta);
+ T w_cross_pt[3];
+ CrossProduct(w, pt, w_cross_pt);
+ T w_dot_pt = DotProduct(w, pt);
+ for (int i = 0; i < 3; ++i) {
+ result[i] = pt[i] * costheta +
+ w_cross_pt[i] * sintheta +
+ w[i] * (T(1.0) - costheta) * w_dot_pt;
+ }
+ } else {
+ // Near zero, the first order Taylor approximation of the rotation
+ // matrix R corresponding to a vector w and angle w is
+ //
+ // R = I + hat(w) * sin(theta)
+ //
+ // But sintheta ~ theta and theta * w = angle_axis, which gives us
+ //
+ // R = I + hat(w)
+ //
+ // and actually performing multiplication with the point pt, gives us
+ // R * pt = pt + w x pt.
+ //
+ // Switching to the Taylor expansion at zero helps avoid all sorts
+ // of numerical nastiness.
+ T w_cross_pt[3];
+ CrossProduct(angle_axis, pt, w_cross_pt);
+ for (int i = 0; i < 3; ++i) {
+ result[i] = pt[i] + w_cross_pt[i];
+ }
+ }
+}
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_ROTATION_H_
diff --git a/include/ceres/sized_cost_function.h b/include/ceres/sized_cost_function.h
new file mode 100644
index 0000000..6bfc1af
--- /dev/null
+++ b/include/ceres/sized_cost_function.h
@@ -0,0 +1,96 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A convenience class for cost functions which are statically sized.
+// Compared to the dynamically-sized base class, this reduces boilerplate.
+//
+// The kNumResiduals template parameter can be a constant such as 2 or 5, or it
+// can be ceres::DYNAMIC. If kNumResiduals is ceres::DYNAMIC, then subclasses
+// are responsible for calling set_num_residuals() at runtime.
+
+#ifndef CERES_PUBLIC_SIZED_COST_FUNCTION_H_
+#define CERES_PUBLIC_SIZED_COST_FUNCTION_H_
+
+#include <glog/logging.h>
+#include "ceres/types.h"
+#include "ceres/cost_function.h"
+
+namespace ceres {
+
+template<int kNumResiduals,
+ int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
+ int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
+class SizedCostFunction : public CostFunction {
+ public:
+ SizedCostFunction() {
+ CHECK(kNumResiduals > 0 || kNumResiduals == DYNAMIC)
+ << "Cost functions must have at least one residual block.";
+
+ // This block breaks the 80 column rule to keep it somewhat readable.
+ CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) ||
+ ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0)))
+ << "Zero block cannot precede a non-zero block. Block sizes are "
+ << "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
+ << N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
+ << N8 << ", " << N9;
+
+ set_num_residuals(kNumResiduals);
+
+#define ADD_PARAMETER_BLOCK(N) \
+ if (N) mutable_parameter_block_sizes()->push_back(N);
+ ADD_PARAMETER_BLOCK(N0);
+ ADD_PARAMETER_BLOCK(N1);
+ ADD_PARAMETER_BLOCK(N2);
+ ADD_PARAMETER_BLOCK(N3);
+ ADD_PARAMETER_BLOCK(N4);
+ ADD_PARAMETER_BLOCK(N5);
+ ADD_PARAMETER_BLOCK(N6);
+ ADD_PARAMETER_BLOCK(N7);
+ ADD_PARAMETER_BLOCK(N8);
+ ADD_PARAMETER_BLOCK(N9);
+#undef ADD_PARAMETER_BLOCK
+ }
+
+ virtual ~SizedCostFunction() { }
+
+ // Subclasses must implement Evaluate().
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_SIZED_COST_FUNCTION_H_
diff --git a/include/ceres/solver.h b/include/ceres/solver.h
new file mode 100644
index 0000000..62fb087
--- /dev/null
+++ b/include/ceres/solver.h
@@ -0,0 +1,654 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_PUBLIC_SOLVER_H_
+#define CERES_PUBLIC_SOLVER_H_
+
+#include <cmath>
+#include <string>
+#include <vector>
+#include "ceres/crs_matrix.h"
+#include "ceres/internal/macros.h"
+#include "ceres/internal/port.h"
+#include "ceres/iteration_callback.h"
+#include "ceres/ordered_groups.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+class Problem;
+
+// Interface for non-linear least squares solvers.
+class Solver {
+ public:
+ virtual ~Solver();
+
+ // The options structure contains, not surprisingly, options that control how
+ // the solver operates. The defaults should be suitable for a wide range of
+ // problems; however, better performance is often obtainable with tweaking.
+ //
+ // The constants are defined inside types.h
+ struct Options {
+ // Default constructor that sets up a generic sparse problem.
+ Options() {
+ trust_region_strategy_type = LEVENBERG_MARQUARDT;
+ dogleg_type = TRADITIONAL_DOGLEG;
+ use_nonmonotonic_steps = false;
+ max_consecutive_nonmonotonic_steps = 5;
+ max_num_iterations = 50;
+ max_solver_time_in_seconds = 1e9;
+ num_threads = 1;
+ initial_trust_region_radius = 1e4;
+ max_trust_region_radius = 1e16;
+ min_trust_region_radius = 1e-32;
+ min_relative_decrease = 1e-3;
+ lm_min_diagonal = 1e-6;
+ lm_max_diagonal = 1e32;
+ max_num_consecutive_invalid_steps = 5;
+ function_tolerance = 1e-6;
+ gradient_tolerance = 1e-10;
+ parameter_tolerance = 1e-8;
+
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
+ linear_solver_type = DENSE_QR;
+#else
+ linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+#endif
+
+ preconditioner_type = JACOBI;
+
+ sparse_linear_algebra_library = SUITE_SPARSE;
+#if defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CXSPARSE)
+ sparse_linear_algebra_library = CX_SPARSE;
+#endif
+
+ num_linear_solver_threads = 1;
+
+#if defined(CERES_NO_SUITESPARSE)
+ use_block_amd = false;
+#else
+ use_block_amd = true;
+#endif
+ linear_solver_ordering = NULL;
+ use_inner_iterations = false;
+ inner_iteration_ordering = NULL;
+ linear_solver_min_num_iterations = 1;
+ linear_solver_max_num_iterations = 500;
+ eta = 1e-1;
+ jacobi_scaling = true;
+ logging_type = PER_MINIMIZER_ITERATION;
+ minimizer_progress_to_stdout = false;
+ return_initial_residuals = false;
+ return_initial_gradient = false;
+ return_initial_jacobian = false;
+ return_final_residuals = false;
+ return_final_gradient = false;
+ return_final_jacobian = false;
+ lsqp_dump_directory = "/tmp";
+ lsqp_dump_format_type = TEXTFILE;
+ check_gradients = false;
+ gradient_check_relative_precision = 1e-8;
+ numeric_derivative_relative_step_size = 1e-6;
+ update_state_every_iteration = false;
+ }
+
+ ~Options();
+ // Minimizer options ----------------------------------------
+
+ TrustRegionStrategyType trust_region_strategy_type;
+
+ // Type of dogleg strategy to use.
+ DoglegType dogleg_type;
+
+ // The classical trust region methods are descent methods, in that
+ // they only accept a point if it strictly reduces the value of
+ // the objective function.
+ //
+ // Relaxing this requirement allows the algorithm to be more
+ // efficient in the long term at the cost of some local increase
+ // in the value of the objective function.
+ //
+ // This is because allowing for non-decreasing objective function
+ // values in a princpled manner allows the algorithm to "jump over
+ // boulders" as the method is not restricted to move into narrow
+ // valleys while preserving its convergence properties.
+ //
+ // Setting use_nonmonotonic_steps to true enables the
+ // non-monotonic trust region algorithm as described by Conn,
+ // Gould & Toint in "Trust Region Methods", Section 10.1.
+ //
+ // The parameter max_consecutive_nonmonotonic_steps controls the
+ // window size used by the step selection algorithm to accept
+ // non-monotonic steps.
+ //
+ // Even though the value of the objective function may be larger
+ // than the minimum value encountered over the course of the
+ // optimization, the final parameters returned to the user are the
+ // ones corresponding to the minimum cost over all iterations.
+ bool use_nonmonotonic_steps;
+ int max_consecutive_nonmonotonic_steps;
+
+ // Maximum number of iterations for the minimizer to run for.
+ int max_num_iterations;
+
+ // Maximum time for which the minimizer should run for.
+ double max_solver_time_in_seconds;
+
+ // Number of threads used by Ceres for evaluating the cost and
+ // jacobians.
+ int num_threads;
+
+ // Trust region minimizer settings.
+ double initial_trust_region_radius;
+ double max_trust_region_radius;
+
+ // Minimizer terminates when the trust region radius becomes
+ // smaller than this value.
+ double min_trust_region_radius;
+
+ // Lower bound for the relative decrease before a step is
+ // accepted.
+ double min_relative_decrease;
+
+ // For the Levenberg-Marquadt algorithm, the scaled diagonal of
+ // the normal equations J'J is used to control the size of the
+ // trust region. Extremely small and large values along the
+ // diagonal can make this regularization scheme
+ // fail. lm_max_diagonal and lm_min_diagonal, clamp the values of
+ // diag(J'J) from above and below. In the normal course of
+ // operation, the user should not have to modify these parameters.
+ double lm_min_diagonal;
+ double lm_max_diagonal;
+
+ // Sometimes due to numerical conditioning problems or linear
+ // solver flakiness, the trust region strategy may return a
+ // numerically invalid step that can be fixed by reducing the
+ // trust region size. So the TrustRegionMinimizer allows for a few
+ // successive invalid steps before it declares NUMERICAL_FAILURE.
+ int max_num_consecutive_invalid_steps;
+
+ // Minimizer terminates when
+ //
+ // (new_cost - old_cost) < function_tolerance * old_cost;
+ //
+ double function_tolerance;
+
+ // Minimizer terminates when
+ //
+ // max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i|
+ //
+ // This value should typically be 1e-4 * function_tolerance.
+ double gradient_tolerance;
+
+ // Minimizer terminates when
+ //
+ // |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance)
+ //
+ double parameter_tolerance;
+
+ // Linear least squares solver options -------------------------------------
+
+ LinearSolverType linear_solver_type;
+
+ // Type of preconditioner to use with the iterative linear solvers.
+ PreconditionerType preconditioner_type;
+
+ // Ceres supports using multiple sparse linear algebra libraries
+ // for sparse matrix ordering and factorizations. Currently,
+ // SUITE_SPARSE and CX_SPARSE are the valid choices, depending on
+ // whether they are linked into Ceres at build time.
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
+
+ // Number of threads used by Ceres to solve the Newton
+ // step. Currently only the SPARSE_SCHUR solver is capable of
+ // using this setting.
+ int num_linear_solver_threads;
+
+ // The order in which variables are eliminated in a linear solver
+ // can have a significant of impact on the efficiency and accuracy
+ // of the method. e.g., when doing sparse Cholesky factorization,
+ // there are matrices for which a good ordering will give a
+ // Cholesky factor with O(n) storage, where as a bad ordering will
+ // result in an completely dense factor.
+ //
+ // Ceres allows the user to provide varying amounts of hints to
+ // the solver about the variable elimination ordering to use. This
+ // can range from no hints, where the solver is free to decide the
+ // best possible ordering based on the user's choices like the
+ // linear solver being used, to an exact order in which the
+ // variables should be eliminated, and a variety of possibilities
+ // in between.
+ //
+ // Instances of the ParameterBlockOrdering class are used to
+ // communicate this information to Ceres.
+ //
+ // Formally an ordering is an ordered partitioning of the
+ // parameter blocks, i.e, each parameter block belongs to exactly
+ // one group, and each group has a unique non-negative integer
+ // associated with it, that determines its order in the set of
+ // groups.
+ //
+ // Given such an ordering, Ceres ensures that the parameter blocks in
+ // the lowest numbered group are eliminated first, and then the
+ // parmeter blocks in the next lowest numbered group and so on. Within
+ // each group, Ceres is free to order the parameter blocks as it
+ // chooses.
+ //
+ // If NULL, then all parameter blocks are assumed to be in the
+ // same group and the solver is free to decide the best
+ // ordering.
+ //
+ // e.g. Consider the linear system
+ //
+ // x + y = 3
+ // 2x + 3y = 7
+ //
+ // There are two ways in which it can be solved. First eliminating x
+ // from the two equations, solving for y and then back substituting
+ // for x, or first eliminating y, solving for x and back substituting
+ // for y. The user can construct three orderings here.
+ //
+ // {0: x}, {1: y} - eliminate x first.
+ // {0: y}, {1: x} - eliminate y first.
+ // {0: x, y} - Solver gets to decide the elimination order.
+ //
+ // Thus, to have Ceres determine the ordering automatically using
+ // heuristics, put all the variables in group 0 and to control the
+ // ordering for every variable, create groups 0..N-1, one per
+ // variable, in the desired order.
+ //
+ // Bundle Adjustment
+ // -----------------
+ //
+ // A particular case of interest is bundle adjustment, where the user
+ // has two options. The default is to not specify an ordering at all,
+ // the solver will see that the user wants to use a Schur type solver
+ // and figure out the right elimination ordering.
+ //
+ // But if the user already knows what parameter blocks are points and
+ // what are cameras, they can save preprocessing time by partitioning
+ // the parameter blocks into two groups, one for the points and one
+ // for the cameras, where the group containing the points has an id
+ // smaller than the group containing cameras.
+ //
+ // Once assigned, Solver::Options owns this pointer and will
+ // deallocate the memory when destroyed.
+ ParameterBlockOrdering* linear_solver_ordering;
+
+ // By virtue of the modeling layer in Ceres being block oriented,
+ // all the matrices used by Ceres are also block oriented. When
+ // doing sparse direct factorization of these matrices (for
+ // SPARSE_NORMAL_CHOLESKY, SPARSE_SCHUR and ITERATIVE in
+ // conjunction with CLUSTER_TRIDIAGONAL AND CLUSTER_JACOBI
+ // preconditioners), the fill-reducing ordering algorithms can
+ // either be run on the block or the scalar form of these matrices.
+ // Running it on the block form exposes more of the super-nodal
+ // structure of the matrix to the factorization routines. Setting
+ // this parameter to true runs the ordering algorithms in block
+ // form. Currently this option only makes sense with
+ // sparse_linear_algebra_library = SUITE_SPARSE.
+ bool use_block_amd;
+
+ // Some non-linear least squares problems have additional
+ // structure in the way the parameter blocks interact that it is
+ // beneficial to modify the way the trust region step is computed.
+ //
+ // e.g., consider the following regression problem
+ //
+ // y = a_1 exp(b_1 x) + a_2 exp(b_3 x^2 + c_1)
+ //
+ // Given a set of pairs{(x_i, y_i)}, the user wishes to estimate
+ // a_1, a_2, b_1, b_2, and c_1.
+ //
+ // Notice here that the expression on the left is linear in a_1
+ // and a_2, and given any value for b_1, b_2 and c_1, it is
+ // possible to use linear regression to estimate the optimal
+ // values of a_1 and a_2. Indeed, its possible to analytically
+ // eliminate the variables a_1 and a_2 from the problem all
+ // together. Problems like these are known as separable least
+ // squares problem and the most famous algorithm for solving them
+ // is the Variable Projection algorithm invented by Golub &
+ // Pereyra.
+ //
+ // Similar structure can be found in the matrix factorization with
+ // missing data problem. There the corresponding algorithm is
+ // known as Wiberg's algorithm.
+ //
+ // Ruhe & Wedin (Algorithms for Separable Nonlinear Least Squares
+ // Problems, SIAM Reviews, 22(3), 1980) present an analyis of
+ // various algorithms for solving separable non-linear least
+ // squares problems and refer to "Variable Projection" as
+ // Algorithm I in their paper.
+ //
+ // Implementing Variable Projection is tedious and expensive, and
+ // they present a simpler algorithm, which they refer to as
+ // Algorithm II, where once the Newton/Trust Region step has been
+ // computed for the whole problem (a_1, a_2, b_1, b_2, c_1) and
+ // additional optimization step is performed to estimate a_1 and
+ // a_2 exactly.
+ //
+ // This idea can be generalized to cases where the residual is not
+ // linear in a_1 and a_2, i.e., Solve for the trust region step
+ // for the full problem, and then use it as the starting point to
+ // further optimize just a_1 and a_2. For the linear case, this
+ // amounts to doing a single linear least squares solve. For
+ // non-linear problems, any method for solving the a_1 and a_2
+ // optimization problems will do. The only constraint on a_1 and
+ // a_2 is that they do not co-occur in any residual block.
+ //
+ // This idea can be further generalized, by not just optimizing
+ // (a_1, a_2), but decomposing the graph corresponding to the
+ // Hessian matrix's sparsity structure in a collection of
+ // non-overlapping independent sets and optimizing each of them.
+ //
+ // Setting "use_inner_iterations" to true enables the use of this
+ // non-linear generalization of Ruhe & Wedin's Algorithm II. This
+ // version of Ceres has a higher iteration complexity, but also
+ // displays better convergence behaviour per iteration. Setting
+ // Solver::Options::num_threads to the maximum number possible is
+ // highly recommended.
+ bool use_inner_iterations;
+
+ // If inner_iterations is true, then the user has two choices.
+ //
+ // 1. Let the solver heuristically decide which parameter blocks
+ // to optimize in each inner iteration. To do this leave
+ // Solver::Options::inner_iteration_ordering untouched.
+ //
+ // 2. Specify a collection of of ordered independent sets. Where
+ // the lower numbered groups are optimized before the higher
+ // number groups. Each group must be an independent set.
+ ParameterBlockOrdering* inner_iteration_ordering;
+
+ // Minimum number of iterations for which the linear solver should
+ // run, even if the convergence criterion is satisfied.
+ int linear_solver_min_num_iterations;
+
+ // Maximum number of iterations for which the linear solver should
+ // run. If the solver does not converge in less than
+ // linear_solver_max_num_iterations, then it returns
+ // MAX_ITERATIONS, as its termination type.
+ int linear_solver_max_num_iterations;
+
+ // Forcing sequence parameter. The truncated Newton solver uses
+ // this number to control the relative accuracy with which the
+ // Newton step is computed.
+ //
+ // This constant is passed to ConjugateGradientsSolver which uses
+ // it to terminate the iterations when
+ //
+ // (Q_i - Q_{i-1})/Q_i < eta/i
+ double eta;
+
+ // Normalize the jacobian using Jacobi scaling before calling
+ // the linear least squares solver.
+ bool jacobi_scaling;
+
+ // Logging options ---------------------------------------------------------
+
+ LoggingType logging_type;
+
+ // By default the Minimizer progress is logged to VLOG(1), which
+ // is sent to STDERR depending on the vlog level. If this flag is
+ // set to true, and logging_type is not SILENT, the logging output
+ // is sent to STDOUT.
+ bool minimizer_progress_to_stdout;
+
+ bool return_initial_residuals;
+ bool return_initial_gradient;
+ bool return_initial_jacobian;
+
+ bool return_final_residuals;
+ bool return_final_gradient;
+ bool return_final_jacobian;
+
+ // List of iterations at which the optimizer should dump the
+ // linear least squares problem to disk. Useful for testing and
+ // benchmarking. If empty (default), no problems are dumped.
+ //
+ // This is ignored if protocol buffers are disabled.
+ vector<int> lsqp_iterations_to_dump;
+ string lsqp_dump_directory;
+ DumpFormatType lsqp_dump_format_type;
+
+ // Finite differences options ----------------------------------------------
+
+ // Check all jacobians computed by each residual block with finite
+ // differences. This is expensive since it involves computing the
+ // derivative by normal means (e.g. user specified, autodiff,
+ // etc), then also computing it using finite differences. The
+ // results are compared, and if they differ substantially, details
+ // are printed to the log.
+ bool check_gradients;
+
+ // Relative precision to check for in the gradient checker. If the
+ // relative difference between an element in a jacobian exceeds
+ // this number, then the jacobian for that cost term is dumped.
+ double gradient_check_relative_precision;
+
+ // Relative shift used for taking numeric derivatives. For finite
+ // differencing, each dimension is evaluated at slightly shifted
+ // values; for the case of central difference, this is what gets
+ // evaluated:
+ //
+ // delta = numeric_derivative_relative_step_size;
+ // f_initial = f(x)
+ // f_forward = f((1 + delta) * x)
+ // f_backward = f((1 - delta) * x)
+ //
+ // The finite differencing is done along each dimension. The
+ // reason to use a relative (rather than absolute) step size is
+ // that this way, numeric differentation works for functions where
+ // the arguments are typically large (e.g. 1e9) and when the
+ // values are small (e.g. 1e-5). It is possible to construct
+ // "torture cases" which break this finite difference heuristic,
+ // but they do not come up often in practice.
+ //
+ // TODO(keir): Pick a smarter number than the default above! In
+ // theory a good choice is sqrt(eps) * x, which for doubles means
+ // about 1e-8 * x. However, I have found this number too
+ // optimistic. This number should be exposed for users to change.
+ double numeric_derivative_relative_step_size;
+
+ // If true, the user's parameter blocks are updated at the end of
+ // every Minimizer iteration, otherwise they are updated when the
+ // Minimizer terminates. This is useful if, for example, the user
+ // wishes to visualize the state of the optimization every
+ // iteration.
+ bool update_state_every_iteration;
+
+ // Callbacks that are executed at the end of each iteration of the
+ // Minimizer. An iteration may terminate midway, either due to
+ // numerical failures or because one of the convergence tests has
+ // been satisfied. In this case none of the callbacks are
+ // executed.
+
+ // Callbacks are executed in the order that they are specified in
+ // this vector. By default, parameter blocks are updated only at
+ // the end of the optimization, i.e when the Minimizer
+ // terminates. This behaviour is controlled by
+ // update_state_every_variable. If the user wishes to have access
+ // to the update parameter blocks when his/her callbacks are
+ // executed, then set update_state_every_iteration to true.
+ //
+ // The solver does NOT take ownership of these pointers.
+ vector<IterationCallback*> callbacks;
+
+ // If non-empty, a summary of the execution of the solver is
+ // recorded to this file.
+ string solver_log;
+ };
+
+ struct Summary {
+ Summary();
+
+ // A brief one line description of the state of the solver after
+ // termination.
+ string BriefReport() const;
+
+ // A full multiline description of the state of the solver after
+ // termination.
+ string FullReport() const;
+
+ // Minimizer summary -------------------------------------------------
+ SolverTerminationType termination_type;
+
+ // If the solver did not run, or there was a failure, a
+ // description of the error.
+ string error;
+
+ // Cost of the problem before and after the optimization. See
+ // problem.h for definition of the cost of a problem.
+ double initial_cost;
+ double final_cost;
+
+ // The part of the total cost that comes from residual blocks that
+ // were held fixed by the preprocessor because all the parameter
+ // blocks that they depend on were fixed.
+ double fixed_cost;
+
+ // Vectors of residuals before and after the optimization. The
+ // entries of these vectors are in the order in which
+ // ResidualBlocks were added to the Problem object.
+ //
+ // Whether the residual vectors are populated with values is
+ // controlled by Solver::Options::return_initial_residuals and
+ // Solver::Options::return_final_residuals respectively.
+ vector<double> initial_residuals;
+ vector<double> final_residuals;
+
+ // Gradient vectors, before and after the optimization. The rows
+ // are in the same order in which the ParameterBlocks were added
+ // to the Problem object.
+ //
+ // NOTE: Since AddResidualBlock adds ParameterBlocks to the
+ // Problem automatically if they do not already exist, if you wish
+ // to have explicit control over the ordering of the vectors, then
+ // use Problem::AddParameterBlock to explicitly add the
+ // ParameterBlocks in the order desired.
+ //
+ // Whether the vectors are populated with values is controlled by
+ // Solver::Options::return_initial_gradient and
+ // Solver::Options::return_final_gradient respectively.
+ vector<double> initial_gradient;
+ vector<double> final_gradient;
+
+ // Jacobian matrices before and after the optimization. The rows
+ // of these matrices are in the same order in which the
+ // ResidualBlocks were added to the Problem object. The columns
+ // are in the same order in which the ParameterBlocks were added
+ // to the Problem object.
+ //
+ // NOTE: Since AddResidualBlock adds ParameterBlocks to the
+ // Problem automatically if they do not already exist, if you wish
+ // to have explicit control over the column ordering of the
+ // matrix, then use Problem::AddParameterBlock to explicitly add
+ // the ParameterBlocks in the order desired.
+ //
+ // The Jacobian matrices are stored as compressed row sparse
+ // matrices. Please see ceres/crs_matrix.h for more details of the
+ // format.
+ //
+ // Whether the Jacboan matrices are populated with values is
+ // controlled by Solver::Options::return_initial_jacobian and
+ // Solver::Options::return_final_jacobian respectively.
+ CRSMatrix initial_jacobian;
+ CRSMatrix final_jacobian;
+
+ vector<IterationSummary> iterations;
+
+ int num_successful_steps;
+ int num_unsuccessful_steps;
+
+ // When the user calls Solve, before the actual optimization
+ // occurs, Ceres performs a number of preprocessing steps. These
+ // include error checks, memory allocations, and reorderings. This
+ // time is accounted for as preprocessing time.
+ double preprocessor_time_in_seconds;
+
+ // Time spent in the TrustRegionMinimizer.
+ double minimizer_time_in_seconds;
+
+ // After the Minimizer is finished, some time is spent in
+ // re-evaluating residuals etc. This time is accounted for in the
+ // postprocessor time.
+ double postprocessor_time_in_seconds;
+
+ // Some total of all time spent inside Ceres when Solve is called.
+ double total_time_in_seconds;
+
+ // Preprocessor summary.
+ int num_parameter_blocks;
+ int num_parameters;
+ int num_residual_blocks;
+ int num_residuals;
+
+ int num_parameter_blocks_reduced;
+ int num_parameters_reduced;
+ int num_residual_blocks_reduced;
+ int num_residuals_reduced;
+
+ int num_eliminate_blocks_given;
+ int num_eliminate_blocks_used;
+
+ int num_threads_given;
+ int num_threads_used;
+
+ int num_linear_solver_threads_given;
+ int num_linear_solver_threads_used;
+
+ LinearSolverType linear_solver_type_given;
+ LinearSolverType linear_solver_type_used;
+
+ PreconditionerType preconditioner_type;
+
+ TrustRegionStrategyType trust_region_strategy_type;
+ DoglegType dogleg_type;
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
+ };
+
+ // Once a least squares problem has been built, this function takes
+ // the problem and optimizes it based on the values of the options
+ // parameters. Upon return, a detailed summary of the work performed
+ // by the preprocessor, the non-linear minmizer and the linear
+ // solver are reported in the summary object.
+ virtual void Solve(const Options& options,
+ Problem* problem,
+ Solver::Summary* summary);
+};
+
+// Helper function which avoids going through the interface.
+void Solve(const Solver::Options& options,
+ Problem* problem,
+ Solver::Summary* summary);
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_SOLVER_H_
diff --git a/include/ceres/types.h b/include/ceres/types.h
new file mode 100644
index 0000000..888e9b0
--- /dev/null
+++ b/include/ceres/types.h
@@ -0,0 +1,320 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Enums and other top level class definitions.
+//
+// Note: internal/types.cc defines stringification routines for some
+// of these enums. Please update those routines if you extend or
+// remove enums from here.
+
+#ifndef CERES_PUBLIC_TYPES_H_
+#define CERES_PUBLIC_TYPES_H_
+
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+// Basic integer types. These typedefs are in the Ceres namespace to avoid
+// conflicts with other packages having similar typedefs.
+typedef short int16;
+typedef int int32;
+
+// Argument type used in interfaces that can optionally take ownership
+// of a passed in argument. If TAKE_OWNERSHIP is passed, the called
+// object takes ownership of the pointer argument, and will call
+// delete on it upon completion.
+enum Ownership {
+ DO_NOT_TAKE_OWNERSHIP,
+ TAKE_OWNERSHIP
+};
+
+// TODO(keir): Considerably expand the explanations of each solver type.
+enum LinearSolverType {
+ // These solvers are for general rectangular systems formed from the
+ // normal equations A'A x = A'b. They are direct solvers and do not
+ // assume any special problem structure.
+
+ // Solve the normal equations using a dense Cholesky solver; based
+ // on Eigen.
+ DENSE_NORMAL_CHOLESKY,
+
+ // Solve the normal equations using a dense QR solver; based on
+ // Eigen.
+ DENSE_QR,
+
+ // Solve the normal equations using a sparse cholesky solver; requires
+ // SuiteSparse or CXSparse.
+ SPARSE_NORMAL_CHOLESKY,
+
+ // Specialized solvers, specific to problems with a generalized
+ // bi-partitite structure.
+
+ // Solves the reduced linear system using a dense Cholesky solver;
+ // based on Eigen.
+ DENSE_SCHUR,
+
+ // Solves the reduced linear system using a sparse Cholesky solver;
+ // based on CHOLMOD.
+ SPARSE_SCHUR,
+
+ // Solves the reduced linear system using Conjugate Gradients, based
+ // on a new Ceres implementation. Suitable for large scale
+ // problems.
+ ITERATIVE_SCHUR,
+
+ // Conjugate gradients on the normal equations.
+ CGNR
+};
+
+enum PreconditionerType {
+ // Trivial preconditioner - the identity matrix.
+ IDENTITY,
+
+ // Block diagonal of the Gauss-Newton Hessian.
+ JACOBI,
+
+ // Block diagonal of the Schur complement. This preconditioner may
+ // only be used with the ITERATIVE_SCHUR solver. Requires
+ // SuiteSparse/CHOLMOD.
+ SCHUR_JACOBI,
+
+ // Visibility clustering based preconditioners.
+ //
+ // These preconditioners are well suited for Structure from Motion
+ // problems, particularly problems arising from community photo
+ // collections. These preconditioners use the visibility structure
+ // of the scene to determine the sparsity structure of the
+ // preconditioner. Requires SuiteSparse/CHOLMOD.
+ CLUSTER_JACOBI,
+ CLUSTER_TRIDIAGONAL
+};
+
+enum SparseLinearAlgebraLibraryType {
+ // High performance sparse Cholesky factorization and approximate
+ // minimum degree ordering.
+ SUITE_SPARSE,
+
+ // A lightweight replacment for SuiteSparse.
+ CX_SPARSE
+};
+
+enum LinearSolverTerminationType {
+ // Termination criterion was met. For factorization based solvers
+ // the tolerance is assumed to be zero. Any user provided values are
+ // ignored.
+ TOLERANCE,
+
+ // Solver ran for max_num_iterations and terminated before the
+ // termination tolerance could be satified.
+ MAX_ITERATIONS,
+
+ // Solver is stuck and further iterations will not result in any
+ // measurable progress.
+ STAGNATION,
+
+ // Solver failed. Solver was terminated due to numerical errors. The
+ // exact cause of failure depends on the particular solver being
+ // used.
+ FAILURE
+};
+
+// Logging options
+// The options get progressively noisier.
+enum LoggingType {
+ SILENT,
+ PER_MINIMIZER_ITERATION
+};
+
+// Ceres supports different strategies for computing the trust region
+// step.
+enum TrustRegionStrategyType {
+ // The default trust region strategy is to use the step computation
+ // used in the Levenberg-Marquardt algorithm. For more details see
+ // levenberg_marquardt_strategy.h
+ LEVENBERG_MARQUARDT,
+
+ // Powell's dogleg algorithm interpolates between the Cauchy point
+ // and the Gauss-Newton step. It is particularly useful if the
+ // LEVENBERG_MARQUARDT algorithm is making a large number of
+ // unsuccessful steps. For more details see dogleg_strategy.h.
+ //
+ // NOTES:
+ //
+ // 1. This strategy has not been experimented with or tested as
+ // extensively as LEVENBERG_MARQUARDT, and therefore it should be
+ // considered EXPERIMENTAL for now.
+ //
+ // 2. For now this strategy should only be used with exact
+ // factorization based linear solvers, i.e., SPARSE_SCHUR,
+ // DENSE_SCHUR, DENSE_QR and SPARSE_NORMAL_CHOLESKY.
+ DOGLEG
+};
+
+// Ceres supports two different dogleg strategies.
+// The "traditional" dogleg method by Powell and the
+// "subspace" method described in
+// R. H. Byrd, R. B. Schnabel, and G. A. Shultz,
+// "Approximate solution of the trust region problem by minimization
+// over two-dimensional subspaces", Mathematical Programming,
+// 40 (1988), pp. 247--263
+enum DoglegType {
+ // The traditional approach constructs a dogleg path
+ // consisting of two line segments and finds the furthest
+ // point on that path that is still inside the trust region.
+ TRADITIONAL_DOGLEG,
+
+ // The subspace approach finds the exact minimum of the model
+ // constrained to the subspace spanned by the dogleg path.
+ SUBSPACE_DOGLEG
+};
+
+enum SolverTerminationType {
+ // The minimizer did not run at all; usually due to errors in the user's
+ // Problem or the solver options.
+ DID_NOT_RUN,
+
+ // The solver ran for maximum number of iterations specified by the
+ // user, but none of the convergence criterion specified by the user
+ // were met.
+ NO_CONVERGENCE,
+
+ // Minimizer terminated because
+ // (new_cost - old_cost) < function_tolerance * old_cost;
+ FUNCTION_TOLERANCE,
+
+ // Minimizer terminated because
+ // max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i|
+ GRADIENT_TOLERANCE,
+
+ // Minimized terminated because
+ // |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance)
+ PARAMETER_TOLERANCE,
+
+ // The minimizer terminated because it encountered a numerical error
+ // that it could not recover from.
+ NUMERICAL_FAILURE,
+
+ // Using an IterationCallback object, user code can control the
+ // minimizer. The following enums indicate that the user code was
+ // responsible for termination.
+
+ // User's IterationCallback returned SOLVER_ABORT.
+ USER_ABORT,
+
+ // User's IterationCallback returned SOLVER_TERMINATE_SUCCESSFULLY
+ USER_SUCCESS
+};
+
+// Enums used by the IterationCallback instances to indicate to the
+// solver whether it should continue solving, the user detected an
+// error or the solution is good enough and the solver should
+// terminate.
+enum CallbackReturnType {
+ // Continue solving to next iteration.
+ SOLVER_CONTINUE,
+
+ // Terminate solver, and do not update the parameter blocks upon
+ // return. Unless the user has set
+ // Solver:Options:::update_state_every_iteration, in which case the
+ // state would have been updated every iteration
+ // anyways. Solver::Summary::termination_type is set to USER_ABORT.
+ SOLVER_ABORT,
+
+ // Terminate solver, update state and
+ // return. Solver::Summary::termination_type is set to USER_SUCCESS.
+ SOLVER_TERMINATE_SUCCESSFULLY
+};
+
+// The format in which linear least squares problems should be logged
+// when Solver::Options::lsqp_iterations_to_dump is non-empty.
+enum DumpFormatType {
+ // Print the linear least squares problem in a human readable format
+ // to stderr. The Jacobian is printed as a dense matrix. The vectors
+ // D, x and f are printed as dense vectors. This should only be used
+ // for small problems.
+ CONSOLE,
+
+ // Write out the linear least squares problem to the directory
+ // pointed to by Solver::Options::lsqp_dump_directory as a protocol
+ // buffer. linear_least_squares_problems.h/cc contains routines for
+ // loading these problems. For details on the on disk format used,
+ // see matrix.proto. The files are named lm_iteration_???.lsqp.
+ PROTOBUF,
+
+ // Write out the linear least squares problem to the directory
+ // pointed to by Solver::Options::lsqp_dump_directory as text files
+ // which can be read into MATLAB/Octave. The Jacobian is dumped as a
+ // text file containing (i,j,s) triplets, the vectors D, x and f are
+ // dumped as text files containing a list of their values.
+ //
+ // A MATLAB/octave script called lm_iteration_???.m is also output,
+ // which can be used to parse and load the problem into memory.
+ TEXTFILE
+};
+
+// For SizedCostFunction and AutoDiffCostFunction, DYNAMIC can be specified for
+// the number of residuals. If specified, then the number of residuas for that
+// cost function can vary at runtime.
+enum DimensionType {
+ DYNAMIC = -1
+};
+
+const char* LinearSolverTypeToString(LinearSolverType type);
+bool StringToLinearSolverType(string value, LinearSolverType* type);
+
+const char* PreconditionerTypeToString(PreconditionerType type);
+bool StringToPreconditionerType(string value, PreconditionerType* type);
+
+const char* SparseLinearAlgebraLibraryTypeToString(
+ SparseLinearAlgebraLibraryType type);
+bool StringToSparseLinearAlgebraLibraryType(
+ string value,
+ SparseLinearAlgebraLibraryType* type);
+
+const char* TrustRegionStrategyTypeToString(TrustRegionStrategyType type);
+bool StringToTrustRegionStrategyType(string value,
+ TrustRegionStrategyType* type);
+
+const char* DoglegTypeToString(DoglegType type);
+bool StringToDoglegType(string value, DoglegType* type);
+
+const char* LinearSolverTerminationTypeToString(
+ LinearSolverTerminationType type);
+
+const char* SolverTerminationTypeToString(SolverTerminationType type);
+
+bool IsSchurType(LinearSolverType type);
+bool IsSparseLinearAlgebraLibraryTypeAvailable(
+ SparseLinearAlgebraLibraryType type);
+
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_TYPES_H_