aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorCarlos Hernandez <chernand@google.com>2014-08-07 17:51:38 -0700
committerCarlos Hernandez <chernand@google.com>2014-08-11 20:38:34 +0000
commit79397c21138f54fcff6ec067b44b847f1f7e0e98 (patch)
treeb2c33877af479b37f082fcbcaf50a24a49b69415 /include
parentad78e589f893a53da23bbdf41274d89dae656f54 (diff)
downloadceres-solver-android-cts-5.0_r8.tar.gz
Update ceres to the latest version in g3android-wear-5.0.0_r1android-cts-5.1_r9android-cts-5.1_r8android-cts-5.1_r7android-cts-5.1_r6android-cts-5.1_r5android-cts-5.1_r4android-cts-5.1_r3android-cts-5.1_r28android-cts-5.1_r27android-cts-5.1_r26android-cts-5.1_r25android-cts-5.1_r24android-cts-5.1_r23android-cts-5.1_r22android-cts-5.1_r21android-cts-5.1_r20android-cts-5.1_r2android-cts-5.1_r19android-cts-5.1_r18android-cts-5.1_r17android-cts-5.1_r16android-cts-5.1_r15android-cts-5.1_r14android-cts-5.1_r13android-cts-5.1_r10android-cts-5.1_r1android-cts-5.0_r9android-cts-5.0_r8android-cts-5.0_r7android-cts-5.0_r6android-cts-5.0_r5android-cts-5.0_r4android-cts-5.0_r3android-5.1.1_r9android-5.1.1_r8android-5.1.1_r7android-5.1.1_r6android-5.1.1_r5android-5.1.1_r4android-5.1.1_r38android-5.1.1_r37android-5.1.1_r36android-5.1.1_r35android-5.1.1_r34android-5.1.1_r33android-5.1.1_r30android-5.1.1_r3android-5.1.1_r29android-5.1.1_r28android-5.1.1_r26android-5.1.1_r25android-5.1.1_r24android-5.1.1_r23android-5.1.1_r22android-5.1.1_r20android-5.1.1_r2android-5.1.1_r19android-5.1.1_r18android-5.1.1_r17android-5.1.1_r16android-5.1.1_r15android-5.1.1_r14android-5.1.1_r13android-5.1.1_r12android-5.1.1_r10android-5.1.1_r1android-5.1.0_r5android-5.1.0_r4android-5.1.0_r3android-5.1.0_r1android-5.0.2_r3android-5.0.2_r1android-5.0.1_r1android-5.0.0_r7android-5.0.0_r6android-5.0.0_r5.1android-5.0.0_r5android-5.0.0_r4android-5.0.0_r3android-5.0.0_r2android-5.0.0_r1lollipop-wear-releaselollipop-releaselollipop-mr1-wfc-releaselollipop-mr1-releaselollipop-mr1-fi-releaselollipop-mr1-devlollipop-mr1-cts-releaselollipop-devlollipop-cts-release
Please pay special attention to the changes in Android.mk. They are the only real changes I had to make. Bug: 16953678 Change-Id: I44a644358e779aaff99a2ea822387fe49ac26888
Diffstat (limited to 'include')
-rw-r--r--include/ceres/autodiff_cost_function.h46
-rw-r--r--include/ceres/autodiff_local_parameterization.h14
-rw-r--r--include/ceres/c_api.h33
-rw-r--r--include/ceres/ceres.h8
-rw-r--r--include/ceres/conditioned_cost_function.h4
-rw-r--r--include/ceres/cost_function.h11
-rw-r--r--include/ceres/cost_function_to_functor.h6
-rw-r--r--include/ceres/covariance.h82
-rw-r--r--include/ceres/crs_matrix.h5
-rw-r--r--include/ceres/dynamic_autodiff_cost_function.h33
-rw-r--r--include/ceres/dynamic_numeric_diff_cost_function.h265
-rw-r--r--include/ceres/fpclassify.h27
-rw-r--r--include/ceres/gradient_checker.h2
-rw-r--r--include/ceres/internal/autodiff.h2
-rw-r--r--include/ceres/internal/disable_warnings.h44
-rw-r--r--include/ceres/internal/fixed_array.h4
-rw-r--r--include/ceres/internal/macros.h5
-rw-r--r--include/ceres/internal/numeric_diff.h30
-rw-r--r--include/ceres/internal/port.h38
-rw-r--r--include/ceres/internal/reenable_warnings.h38
-rw-r--r--include/ceres/iteration_callback.h11
-rw-r--r--include/ceres/jet.h196
-rw-r--r--include/ceres/local_parameterization.h15
-rw-r--r--include/ceres/loss_function.h23
-rw-r--r--include/ceres/normal_prior.h5
-rw-r--r--include/ceres/numeric_diff_cost_function.h42
-rw-r--r--include/ceres/numeric_diff_functor.h7
-rw-r--r--include/ceres/ordered_groups.h35
-rw-r--r--include/ceres/problem.h71
-rw-r--r--include/ceres/rotation.h73
-rw-r--r--include/ceres/solver.h210
-rw-r--r--include/ceres/types.h208
-rw-r--r--include/ceres/version.h49
33 files changed, 1134 insertions, 508 deletions
diff --git a/include/ceres/autodiff_cost_function.h b/include/ceres/autodiff_cost_function.h
index 371a11f..7c0fa79 100644
--- a/include/ceres/autodiff_cost_function.h
+++ b/include/ceres/autodiff_cost_function.h
@@ -96,7 +96,7 @@
// "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing a
// 1-dimensional output from two arguments, both 2-dimensional.
//
-// The autodiff cost function also supports cost functions with a
+// AutoDiffCostFunction also supports cost functions with a
// runtime-determined number of residuals. For example:
//
// CostFunction* cost_function
@@ -110,8 +110,9 @@
// Dimension of x ------------------------------------+ |
// Dimension of y ---------------------------------------+
//
-// The framework can currently accommodate cost functions of up to 6 independent
-// variables, and there is no limit on the dimensionality of each of them.
+// The framework can currently accommodate cost functions of up to 10
+// independent variables, and there is no limit on the dimensionality
+// of each of them.
//
// WARNING #1: Since the functor will get instantiated with different types for
// T, you must to convert from other numeric types to T before mixing
@@ -145,13 +146,13 @@ namespace ceres {
//
// The constructors take ownership of the cost functor.
//
-// If the number of residuals (argument "M" below) is ceres::DYNAMIC, then the
-// two-argument constructor must be used. The second constructor takes a number
-// of residuals (in addition to the templated number of residuals). This allows
-// for varying the number of residuals for a single autodiff cost function at
-// runtime.
+// If the number of residuals (argument kNumResiduals below) is
+// ceres::DYNAMIC, then the two-argument constructor must be used. The
+// second constructor takes a number of residuals (in addition to the
+// templated number of residuals). This allows for varying the number
+// of residuals for a single autodiff cost function at runtime.
template <typename CostFunctor,
- int M, // Number of residuals, or ceres::DYNAMIC.
+ int kNumResiduals, // Number of residuals, or ceres::DYNAMIC.
int N0, // Number of parameters in block 0.
int N1 = 0, // Number of parameters in block 1.
int N2 = 0, // Number of parameters in block 2.
@@ -162,28 +163,32 @@ template <typename CostFunctor,
int N7 = 0, // Number of parameters in block 7.
int N8 = 0, // Number of parameters in block 8.
int N9 = 0> // Number of parameters in block 9.
-class AutoDiffCostFunction : public SizedCostFunction<M,
+class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9> {
public:
// Takes ownership of functor. Uses the template-provided value for the
- // number of residuals ("M").
+ // number of residuals ("kNumResiduals").
explicit AutoDiffCostFunction(CostFunctor* functor)
: functor_(functor) {
- CHECK_NE(M, DYNAMIC) << "Can't run the fixed-size constructor if the "
- << "number of residuals is set to ceres::DYNAMIC.";
+ CHECK_NE(kNumResiduals, DYNAMIC)
+ << "Can't run the fixed-size constructor if the "
+ << "number of residuals is set to ceres::DYNAMIC.";
}
- // Takes ownership of functor. Ignores the template-provided number of
- // residuals ("M") in favor of the "num_residuals" argument provided.
+ // Takes ownership of functor. Ignores the template-provided
+ // kNumResiduals in favor of the "num_residuals" argument provided.
//
// This allows for having autodiff cost functions which return varying
// numbers of residuals at runtime.
AutoDiffCostFunction(CostFunctor* functor, int num_residuals)
: functor_(functor) {
- CHECK_EQ(M, DYNAMIC) << "Can't run the dynamic-size constructor if the "
- << "number of residuals is not ceres::DYNAMIC.";
- SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>
+ CHECK_EQ(kNumResiduals, DYNAMIC)
+ << "Can't run the dynamic-size constructor if the "
+ << "number of residuals is not ceres::DYNAMIC.";
+ SizedCostFunction<kNumResiduals,
+ N0, N1, N2, N3, N4,
+ N5, N6, N7, N8, N9>
::set_num_residuals(num_residuals);
}
@@ -206,8 +211,9 @@ class AutoDiffCostFunction : public SizedCostFunction<M,
N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Differentiate(
*functor_,
parameters,
- SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>
- ::num_residuals(),
+ SizedCostFunction<kNumResiduals,
+ N0, N1, N2, N3, N4,
+ N5, N6, N7, N8, N9>::num_residuals(),
residuals,
jacobians);
}
diff --git a/include/ceres/autodiff_local_parameterization.h b/include/ceres/autodiff_local_parameterization.h
index 0aae6c7..c100d48 100644
--- a/include/ceres/autodiff_local_parameterization.h
+++ b/include/ceres/autodiff_local_parameterization.h
@@ -107,11 +107,18 @@ namespace ceres {
template <typename Functor, int kGlobalSize, int kLocalSize>
class AutoDiffLocalParameterization : public LocalParameterization {
public:
+ AutoDiffLocalParameterization() :
+ functor_(new Functor()) {}
+
+ // Takes ownership of functor.
+ explicit AutoDiffLocalParameterization(Functor* functor) :
+ functor_(functor) {}
+
virtual ~AutoDiffLocalParameterization() {}
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const {
- return Functor()(x, delta, x_plus_delta);
+ return (*functor_)(x, delta, x_plus_delta);
}
virtual bool ComputeJacobian(const double* x, double* jacobian) const {
@@ -128,7 +135,7 @@ class AutoDiffLocalParameterization : public LocalParameterization {
const double* parameter_ptrs[2] = {x, zero_delta};
double* jacobian_ptrs[2] = { NULL, jacobian };
return internal::AutoDiff<Functor, double, kGlobalSize, kLocalSize>
- ::Differentiate(Functor(),
+ ::Differentiate(*functor_,
parameter_ptrs,
kGlobalSize,
x_plus_delta,
@@ -137,6 +144,9 @@ class AutoDiffLocalParameterization : public LocalParameterization {
virtual int GlobalSize() const { return kGlobalSize; }
virtual int LocalSize() const { return kLocalSize; }
+
+ private:
+ internal::scoped_ptr<Functor> functor_;
};
} // namespace ceres
diff --git a/include/ceres/c_api.h b/include/ceres/c_api.h
index add68de..71f41fd 100644
--- a/include/ceres/c_api.h
+++ b/include/ceres/c_api.h
@@ -38,12 +38,15 @@
#ifndef CERES_PUBLIC_C_API_H_
#define CERES_PUBLIC_C_API_H_
+#include "ceres/internal/port.h"
+#include "ceres/internal/disable_warnings.h"
+
#ifdef __cplusplus
extern "C" {
#endif
/* Init the Ceres private data. Must be called before anything else. */
-void ceres_init();
+CERES_EXPORT void ceres_init();
/* Equivalent to CostFunction::Evaluate() in the C++ API.
*
@@ -88,23 +91,23 @@ typedef void (*ceres_loss_function_t)(void* user_data,
*
* See loss_function.h for the details of each loss function.
*/
-void* ceres_create_huber_loss_function_data(double a);
-void* ceres_create_softl1_loss_function_data(double a);
-void* ceres_create_cauchy_loss_function_data(double a);
-void* ceres_create_arctan_loss_function_data(double a);
-void* ceres_create_tolerant_loss_function_data(double a, double b);
+CERES_EXPORT void* ceres_create_huber_loss_function_data(double a);
+CERES_EXPORT void* ceres_create_softl1_loss_function_data(double a);
+CERES_EXPORT void* ceres_create_cauchy_loss_function_data(double a);
+CERES_EXPORT void* ceres_create_arctan_loss_function_data(double a);
+CERES_EXPORT void* ceres_create_tolerant_loss_function_data(double a, double b);
/* Free the given stock loss function data. */
-void ceres_free_stock_loss_function_data(void* loss_function_data);
+CERES_EXPORT void ceres_free_stock_loss_function_data(void* loss_function_data);
/* This is an implementation of ceres_loss_function_t contained within Ceres
* itself, intended as a way to access the various stock Ceres loss functions
* from the C API. This should be passed to ceres_add_residual() below, in
* combination with a user_data pointer generated by
* ceres_create_stock_loss_function() above. */
-void ceres_stock_loss_function(void* user_data,
- double squared_norm,
- double out[3]);
+CERES_EXPORT void ceres_stock_loss_function(void* user_data,
+ double squared_norm,
+ double out[3]);
/* Equivalent to Problem from the C++ API. */
struct ceres_problem_s;
@@ -115,11 +118,11 @@ typedef struct ceres_residual_block_id_s ceres_residual_block_id_t;
/* Create and destroy a problem */
/* TODO(keir): Add options for the problem. */
-ceres_problem_t* ceres_create_problem();
-void ceres_free_problem(ceres_problem_t* problem);
+CERES_EXPORT ceres_problem_t* ceres_create_problem();
+CERES_EXPORT void ceres_free_problem(ceres_problem_t* problem);
/* Add a residual block. */
-ceres_residual_block_id_t* ceres_problem_add_residual_block(
+CERES_EXPORT ceres_residual_block_id_t* ceres_problem_add_residual_block(
ceres_problem_t* problem,
ceres_cost_function_t cost_function,
void* cost_function_data,
@@ -130,7 +133,7 @@ ceres_residual_block_id_t* ceres_problem_add_residual_block(
int* parameter_block_sizes,
double** parameters);
-void ceres_solve(ceres_problem_t* problem);
+CERES_EXPORT void ceres_solve(ceres_problem_t* problem);
/* TODO(keir): Figure out a way to pass a config in. */
@@ -138,4 +141,6 @@ void ceres_solve(ceres_problem_t* problem);
}
#endif
+#include "ceres/internal/reenable_warnings.h"
+
#endif /* CERES_PUBLIC_C_API_H_ */
diff --git a/include/ceres/ceres.h b/include/ceres/ceres.h
index 61b8b94..41bd649 100644
--- a/include/ceres/ceres.h
+++ b/include/ceres/ceres.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -34,15 +34,14 @@
#ifndef CERES_PUBLIC_CERES_H_
#define CERES_PUBLIC_CERES_H_
-#define CERES_VERSION 1.7.0
-#define CERES_ABI_VERSION 1.7.0
-
#include "ceres/autodiff_cost_function.h"
#include "ceres/autodiff_local_parameterization.h"
#include "ceres/cost_function.h"
#include "ceres/cost_function_to_functor.h"
#include "ceres/covariance.h"
#include "ceres/crs_matrix.h"
+#include "ceres/dynamic_autodiff_cost_function.h"
+#include "ceres/dynamic_numeric_diff_cost_function.h"
#include "ceres/iteration_callback.h"
#include "ceres/jet.h"
#include "ceres/local_parameterization.h"
@@ -54,5 +53,6 @@
#include "ceres/sized_cost_function.h"
#include "ceres/solver.h"
#include "ceres/types.h"
+#include "ceres/version.h"
#endif // CERES_PUBLIC_CERES_H_
diff --git a/include/ceres/conditioned_cost_function.h b/include/ceres/conditioned_cost_function.h
index 498d36e..3f0087c 100644
--- a/include/ceres/conditioned_cost_function.h
+++ b/include/ceres/conditioned_cost_function.h
@@ -39,6 +39,7 @@
#include "ceres/cost_function.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
+#include "ceres/internal/disable_warnings.h"
namespace ceres {
@@ -70,7 +71,7 @@ namespace ceres {
// ccf_residual[i] = f_i(my_cost_function_residual[i])
//
// and the Jacobian will be affected appropriately.
-class ConditionedCostFunction : public CostFunction {
+class CERES_EXPORT ConditionedCostFunction : public CostFunction {
public:
// Builds a cost function based on a wrapped cost function, and a
// per-residual conditioner. Takes ownership of all of the wrapped cost
@@ -93,5 +94,6 @@ class ConditionedCostFunction : public CostFunction {
} // namespace ceres
+#include "ceres/internal/reenable_warnings.h"
#endif // CERES_PUBLIC_CONDITIONED_COST_FUNCTION_H_
diff --git a/include/ceres/cost_function.h b/include/ceres/cost_function.h
index 8013e96..45292ec 100644
--- a/include/ceres/cost_function.h
+++ b/include/ceres/cost_function.h
@@ -48,6 +48,7 @@
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/types.h"
+#include "ceres/internal/disable_warnings.h"
namespace ceres {
@@ -60,7 +61,7 @@ namespace ceres {
// code inheriting from this class is expected to set these two members with the
// corresponding accessors. This information will be verified by the Problem
// when added with AddResidualBlock().
-class CostFunction {
+class CERES_EXPORT CostFunction {
public:
CostFunction() : num_residuals_(0) {}
@@ -115,7 +116,7 @@ class CostFunction {
double* residuals,
double** jacobians) const = 0;
- const vector<int16>& parameter_block_sizes() const {
+ const vector<int32>& parameter_block_sizes() const {
return parameter_block_sizes_;
}
@@ -124,7 +125,7 @@ class CostFunction {
}
protected:
- vector<int16>* mutable_parameter_block_sizes() {
+ vector<int32>* mutable_parameter_block_sizes() {
return &parameter_block_sizes_;
}
@@ -135,11 +136,13 @@ class CostFunction {
private:
// Cost function signature metadata: number of inputs & their sizes,
// number of outputs (residuals).
- vector<int16> parameter_block_sizes_;
+ vector<int32> parameter_block_sizes_;
int num_residuals_;
CERES_DISALLOW_COPY_AND_ASSIGN(CostFunction);
};
} // namespace ceres
+#include "ceres/internal/reenable_warnings.h"
+
#endif // CERES_PUBLIC_COST_FUNCTION_H_
diff --git a/include/ceres/cost_function_to_functor.h b/include/ceres/cost_function_to_functor.h
index fa1012d..0d01f77 100644
--- a/include/ceres/cost_function_to_functor.h
+++ b/include/ceres/cost_function_to_functor.h
@@ -127,7 +127,7 @@ class CostFunctionToFunctor {
<< N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
<< N8 << ", " << N9;
- const vector<int16>& parameter_block_sizes =
+ const vector<int32>& parameter_block_sizes =
cost_function->parameter_block_sizes();
const int num_parameter_blocks =
(N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) +
@@ -679,7 +679,7 @@ class CostFunctionToFunctor {
template <typename JetT>
bool EvaluateWithJets(const JetT** inputs, JetT* output) const {
const int kNumParameters = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9;
- const vector<int16>& parameter_block_sizes =
+ const vector<int32>& parameter_block_sizes =
cost_function_->parameter_block_sizes();
const int num_parameter_blocks = parameter_block_sizes.size();
const int num_residuals = cost_function_->num_residuals();
@@ -732,7 +732,7 @@ class CostFunctionToFunctor {
output[i].v.setZero();
for (int j = 0; j < num_parameter_blocks; ++j) {
- const int16 block_size = parameter_block_sizes[j];
+ const int32 block_size = parameter_block_sizes[j];
for (int k = 0; k < parameter_block_sizes[j]; ++k) {
output[i].v +=
jacobian_blocks[j][i * block_size + k] * inputs[j][k].v;
diff --git a/include/ceres/covariance.h b/include/ceres/covariance.h
index 83126b5..35fde4d 100644
--- a/include/ceres/covariance.h
+++ b/include/ceres/covariance.h
@@ -36,6 +36,7 @@
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
+#include "ceres/internal/disable_warnings.h"
namespace ceres {
@@ -196,14 +197,14 @@ class CovarianceImpl;
// covariance.GetCovarianceBlock(y, y, covariance_yy)
// covariance.GetCovarianceBlock(x, y, covariance_xy)
//
-class Covariance {
+class CERES_EXPORT Covariance {
public:
- struct Options {
+ struct CERES_EXPORT Options {
Options()
#ifndef CERES_NO_SUITESPARSE
- : algorithm_type(SPARSE_QR),
+ : algorithm_type(SUITE_SPARSE_QR),
#else
- : algorithm_type(DENSE_SVD),
+ : algorithm_type(EIGEN_SPARSE_QR),
#endif
min_reciprocal_condition_number(1e-14),
null_space_rank(0),
@@ -228,47 +229,22 @@ class Covariance {
// for small to moderate sized problems. It can handle
// full-rank as well as rank deficient Jacobians.
//
- // 2. SPARSE_CHOLESKY uses the CHOLMOD sparse Cholesky
- // factorization library to compute the decomposition :
- //
- // R'R = J'J
- //
- // and then
- //
- // [J'J]^-1 = [R'R]^-1
- //
- // It a fast algorithm for sparse matrices that should be used
- // when the Jacobian matrix J is well conditioned. For
- // ill-conditioned matrices, this algorithm can fail
- // unpredictabily. This is because Cholesky factorization is
- // not a rank-revealing factorization, i.e., it cannot reliably
- // detect when the matrix being factorized is not of full
- // rank. SuiteSparse/CHOLMOD supplies a heuristic for checking
- // if the matrix is rank deficient (cholmod_rcond), but it is
- // only a heuristic and can have both false positive and false
- // negatives.
- //
- // Recent versions of SuiteSparse (>= 4.2.0) provide a much
- // more efficient method for solving for rows of the covariance
- // matrix. Therefore, if you are doing SPARSE_CHOLESKY, we
- // strongly recommend using a recent version of SuiteSparse.
- //
- // 3. SPARSE_QR uses the SuiteSparseQR sparse QR factorization
- // library to compute the decomposition
+ // 2. EIGEN_SPARSE_QR uses the sparse QR factorization algorithm
+ // in Eigen to compute the decomposition
//
// Q * R = J
//
// [J'J]^-1 = [R*R']^-1
//
- // It is a moderately fast algorithm for sparse matrices, which
- // at the price of more time and memory than the
- // SPARSE_CHOLESKY algorithm is numerically better behaved and
- // is rank revealing, i.e., it can reliably detect when the
- // Jacobian matrix is rank deficient.
+ // It is a moderately fast algorithm for sparse matrices.
//
- // Neither SPARSE_CHOLESKY or SPARSE_QR are capable of computing
- // the covariance if the Jacobian is rank deficient.
-
+ // 3. SUITE_SPARSE_QR uses the SuiteSparseQR sparse QR
+ // factorization algorithm. It uses dense linear algebra and is
+ // multi threaded, so for large sparse sparse matrices it is
+ // significantly faster than EIGEN_SPARSE_QR.
+ //
+ // Neither EIGEN_SPARSE_QR not SUITE_SPARSE_QR are capable of
+ // computing the covariance if the Jacobian is rank deficient.
CovarianceAlgorithmType algorithm_type;
// If the Jacobian matrix is near singular, then inverting J'J
@@ -294,29 +270,13 @@ class Covariance {
// where min_sigma and max_sigma are the minimum and maxiumum
// singular values of J respectively.
//
- // 2. SPARSE_CHOLESKY
- //
- // cholmod_rcond < min_reciprocal_conditioner_number
- //
- // Here cholmod_rcond is a crude estimate of the reciprocal
- // condition number of J'J by using the maximum and minimum
- // diagonal entries of the Cholesky factor R. There are no
- // theoretical guarantees associated with this test. It can
- // give false positives and negatives. Use at your own
- // risk. The default value of min_reciprocal_condition_number
- // has been set to a conservative value, and sometimes the
- // Covariance::Compute may return false even if it is possible
- // to estimate the covariance reliably. In such cases, the user
- // should exercise their judgement before lowering the value of
- // min_reciprocal_condition_number.
- //
- // 3. SPARSE_QR
+ // 2. SUITE_SPARSE_QR and EIGEN_SPARSE_QR
//
// rank(J) < num_col(J)
//
// Here rank(J) is the estimate of the rank of J returned by the
- // SuiteSparseQR algorithm. It is a fairly reliable indication
- // of rank deficiency.
+ // sparse QR factorization algorithm. It is a fairly reliable
+ // indication of rank deficiency.
//
double min_reciprocal_condition_number;
@@ -351,8 +311,8 @@ class Covariance {
//
// lambda_i / lambda_max < min_reciprocal_condition_number.
//
- // This option has no effect on the SPARSE_CHOLESKY or SPARSE_QR
- // algorithms.
+ // This option has no effect on the SUITE_SPARSE_QR and
+ // EIGEN_SPARSE_QR algorithms.
int null_space_rank;
int num_threads;
@@ -419,4 +379,6 @@ class Covariance {
} // namespace ceres
+#include "ceres/internal/reenable_warnings.h"
+
#endif // CERES_PUBLIC_COVARIANCE_H_
diff --git a/include/ceres/crs_matrix.h b/include/ceres/crs_matrix.h
index 8c470cd..d2d6289 100644
--- a/include/ceres/crs_matrix.h
+++ b/include/ceres/crs_matrix.h
@@ -33,12 +33,13 @@
#include <vector>
#include "ceres/internal/port.h"
+#include "ceres/internal/disable_warnings.h"
namespace ceres {
// A compressed row sparse matrix used primarily for communicating the
// Jacobian matrix to the user.
-struct CRSMatrix {
+struct CERES_EXPORT CRSMatrix {
CRSMatrix() : num_rows(0), num_cols(0) {}
int num_rows;
@@ -80,4 +81,6 @@ struct CRSMatrix {
} // namespace ceres
+#include "ceres/internal/reenable_warnings.h"
+
#endif // CERES_PUBLIC_CRS_MATRIX_H_
diff --git a/include/ceres/dynamic_autodiff_cost_function.h b/include/ceres/dynamic_autodiff_cost_function.h
index 5d8f188..f9342cd 100644
--- a/include/ceres/dynamic_autodiff_cost_function.h
+++ b/include/ceres/dynamic_autodiff_cost_function.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -26,18 +26,17 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
-// Author: mierle@gmail.com (Keir Mierle)
-// sameeragarwal@google.com (Sameer Agarwal)
-// thadh@gmail.com (Thad Hughes)
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+// mierle@gmail.com (Keir Mierle)
//
// This autodiff implementation differs from the one found in
-// autodiff_cost_function.h by supporting autodiff on cost functions with
-// variable numbers of parameters with variable sizes. With the other
-// implementation, all the sizes (both the number of parameter blocks and the
-// size of each block) must be fixed at compile time.
+// autodiff_cost_function.h by supporting autodiff on cost functions
+// with variable numbers of parameters with variable sizes. With the
+// other implementation, all the sizes (both the number of parameter
+// blocks and the size of each block) must be fixed at compile time.
//
-// The functor API differs slightly from the API for fixed size autodiff; the
-// expected interface for the cost functors is:
+// The functor API differs slightly from the API for fixed size
+// autodiff; the expected interface for the cost functors is:
//
// struct MyCostFunctor {
// template<typename T>
@@ -46,8 +45,9 @@
// }
// }
//
-// Since the sizing of the parameters is done at runtime, you must also specify
-// the sizes after creating the dynamic autodiff cost function. For example:
+// Since the sizing of the parameters is done at runtime, you must
+// also specify the sizes after creating the dynamic autodiff cost
+// function. For example:
//
// DynamicAutoDiffCostFunction<MyCostFunctor, 3> cost_function(
// new MyCostFunctor());
@@ -55,10 +55,11 @@
// cost_function.AddParameterBlock(10);
// cost_function.SetNumResiduals(21);
//
-// Under the hood, the implementation evaluates the cost function multiple
-// times, computing a small set of the derivatives (four by default, controlled
-// by the Stride template parameter) with each pass. There is a tradeoff with
-// the size of the passes; you may want to experiment with the stride.
+// Under the hood, the implementation evaluates the cost function
+// multiple times, computing a small set of the derivatives (four by
+// default, controlled by the Stride template parameter) with each
+// pass. There is a tradeoff with the size of the passes; you may want
+// to experiment with the stride.
#ifndef CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
#define CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
diff --git a/include/ceres/dynamic_numeric_diff_cost_function.h b/include/ceres/dynamic_numeric_diff_cost_function.h
new file mode 100644
index 0000000..2b6e826
--- /dev/null
+++ b/include/ceres/dynamic_numeric_diff_cost_function.h
@@ -0,0 +1,265 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+// sameeragarwal@google.com (Sameer Agarwal)
+// thadh@gmail.com (Thad Hughes)
+//
+// This numeric diff implementation differs from the one found in
+// numeric_diff_cost_function.h by supporting numericdiff on cost
+// functions with variable numbers of parameters with variable
+// sizes. With the other implementation, all the sizes (both the
+// number of parameter blocks and the size of each block) must be
+// fixed at compile time.
+//
+// The functor API differs slightly from the API for fixed size
+// numeric diff; the expected interface for the cost functors is:
+//
+// struct MyCostFunctor {
+// template<typename T>
+// bool operator()(double const* const* parameters, double* residuals) const {
+// // Use parameters[i] to access the i'th parameter block.
+// }
+// }
+//
+// Since the sizing of the parameters is done at runtime, you must
+// also specify the sizes after creating the
+// DynamicNumericDiffCostFunction. For example:
+//
+// DynamicAutoDiffCostFunction<MyCostFunctor, CENTRAL> cost_function(
+// new MyCostFunctor());
+// cost_function.AddParameterBlock(5);
+// cost_function.AddParameterBlock(10);
+// cost_function.SetNumResiduals(21);
+
+#ifndef CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
+#define CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
+
+#include <cmath>
+#include <numeric>
+#include <vector>
+
+#include "ceres/cost_function.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/numeric_diff.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+template <typename CostFunctor, NumericDiffMethod method = CENTRAL>
+class DynamicNumericDiffCostFunction : public CostFunction {
+ public:
+ explicit DynamicNumericDiffCostFunction(const CostFunctor* functor,
+ Ownership ownership = TAKE_OWNERSHIP,
+ double relative_step_size = 1e-6)
+ : functor_(functor),
+ ownership_(ownership),
+ relative_step_size_(relative_step_size) {
+ }
+
+ virtual ~DynamicNumericDiffCostFunction() {
+ if (ownership_ != TAKE_OWNERSHIP) {
+ functor_.release();
+ }
+ }
+
+ void AddParameterBlock(int size) {
+ mutable_parameter_block_sizes()->push_back(size);
+ }
+
+ void SetNumResiduals(int num_residuals) {
+ set_num_residuals(num_residuals);
+ }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ CHECK_GT(num_residuals(), 0)
+ << "You must call DynamicNumericDiffCostFunction::SetNumResiduals() "
+ << "before DynamicNumericDiffCostFunction::Evaluate().";
+
+ const vector<int32>& block_sizes = parameter_block_sizes();
+ CHECK(!block_sizes.empty())
+ << "You must call DynamicNumericDiffCostFunction::AddParameterBlock() "
+ << "before DynamicNumericDiffCostFunction::Evaluate().";
+
+ const bool status = EvaluateCostFunctor(parameters, residuals);
+ if (jacobians == NULL || !status) {
+ return status;
+ }
+
+ // Create local space for a copy of the parameters which will get mutated.
+ int parameters_size = accumulate(block_sizes.begin(), block_sizes.end(), 0);
+ vector<double> parameters_copy(parameters_size);
+ vector<double*> parameters_references_copy(block_sizes.size());
+ parameters_references_copy[0] = &parameters_copy[0];
+ for (int block = 1; block < block_sizes.size(); ++block) {
+ parameters_references_copy[block] = parameters_references_copy[block - 1]
+ + block_sizes[block - 1];
+ }
+
+ // Copy the parameters into the local temp space.
+ for (int block = 0; block < block_sizes.size(); ++block) {
+ memcpy(parameters_references_copy[block],
+ parameters[block],
+ block_sizes[block] * sizeof(*parameters[block]));
+ }
+
+ for (int block = 0; block < block_sizes.size(); ++block) {
+ if (jacobians[block] != NULL &&
+ !EvaluateJacobianForParameterBlock(block_sizes[block],
+ block,
+ relative_step_size_,
+ residuals,
+ &parameters_references_copy[0],
+ jacobians)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private:
+ bool EvaluateJacobianForParameterBlock(const int parameter_block_size,
+ const int parameter_block,
+ const double relative_step_size,
+ double const* residuals_at_eval_point,
+ double** parameters,
+ double** jacobians) const {
+ using Eigen::Map;
+ using Eigen::Matrix;
+ using Eigen::Dynamic;
+ using Eigen::RowMajor;
+
+ typedef Matrix<double, Dynamic, 1> ResidualVector;
+ typedef Matrix<double, Dynamic, 1> ParameterVector;
+ typedef Matrix<double, Dynamic, Dynamic, RowMajor> JacobianMatrix;
+
+ int num_residuals = this->num_residuals();
+
+ Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block],
+ num_residuals,
+ parameter_block_size);
+
+ // Mutate one element at a time and then restore.
+ Map<ParameterVector> x_plus_delta(parameters[parameter_block],
+ parameter_block_size);
+ ParameterVector x(x_plus_delta);
+ ParameterVector step_size = x.array().abs() * relative_step_size;
+
+ // To handle cases where a paremeter is exactly zero, instead use
+ // the mean step_size for the other dimensions.
+ double fallback_step_size = step_size.sum() / step_size.rows();
+ if (fallback_step_size == 0.0) {
+ // If all the parameters are zero, there's no good answer. Use the given
+ // relative step_size as absolute step_size and hope for the best.
+ fallback_step_size = relative_step_size;
+ }
+
+ // For each parameter in the parameter block, use finite
+ // differences to compute the derivative for that parameter.
+ for (int j = 0; j < parameter_block_size; ++j) {
+ if (step_size(j) == 0.0) {
+ // The parameter is exactly zero, so compromise and use the
+ // mean step_size from the other parameters. This can break in
+ // many cases, but it's hard to pick a good number without
+ // problem specific knowledge.
+ step_size(j) = fallback_step_size;
+ }
+ x_plus_delta(j) = x(j) + step_size(j);
+
+ ResidualVector residuals(num_residuals);
+ if (!EvaluateCostFunctor(parameters, &residuals[0])) {
+ // Something went wrong; bail.
+ return false;
+ }
+
+ // Compute this column of the jacobian in 3 steps:
+ // 1. Store residuals for the forward part.
+ // 2. Subtract residuals for the backward (or 0) part.
+ // 3. Divide out the run.
+ parameter_jacobian.col(j).matrix() = residuals;
+
+ double one_over_h = 1 / step_size(j);
+ if (method == CENTRAL) {
+ // Compute the function on the other side of x(j).
+ x_plus_delta(j) = x(j) - step_size(j);
+
+ if (!EvaluateCostFunctor(parameters, &residuals[0])) {
+ // Something went wrong; bail.
+ return false;
+ }
+
+ parameter_jacobian.col(j) -= residuals;
+ one_over_h /= 2;
+ } else {
+ // Forward difference only; reuse existing residuals evaluation.
+ parameter_jacobian.col(j) -=
+ Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
+ }
+ x_plus_delta(j) = x(j); // Restore x_plus_delta.
+
+ // Divide out the run to get slope.
+ parameter_jacobian.col(j) *= one_over_h;
+ }
+ return true;
+ }
+
+ bool EvaluateCostFunctor(double const* const* parameters,
+ double* residuals) const {
+ return EvaluateCostFunctorImpl(functor_.get(),
+ parameters,
+ residuals,
+ functor_.get());
+ }
+
+ // Helper templates to allow evaluation of a functor or a
+ // CostFunction.
+ bool EvaluateCostFunctorImpl(const CostFunctor* functor,
+ double const* const* parameters,
+ double* residuals,
+ const void* /* NOT USED */) const {
+ return (*functor)(parameters, residuals);
+ }
+
+ bool EvaluateCostFunctorImpl(const CostFunctor* functor,
+ double const* const* parameters,
+ double* residuals,
+ const CostFunction* /* NOT USED */) const {
+ return functor->Evaluate(parameters, residuals, NULL);
+ }
+
+ internal::scoped_ptr<const CostFunctor> functor_;
+ Ownership ownership_;
+ const double relative_step_size_;
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
diff --git a/include/ceres/fpclassify.h b/include/ceres/fpclassify.h
index b730832..da8a4d0 100644
--- a/include/ceres/fpclassify.h
+++ b/include/ceres/fpclassify.h
@@ -46,25 +46,24 @@
namespace ceres {
#if defined(_MSC_VER)
-inline bool IsFinite (double x) { return _finite(x); }
-inline bool IsInfinite(double x) { return !_finite(x) && !_isnan(x); }
-inline bool IsNaN (double x) { return _isnan(x); }
+
+inline bool IsFinite (double x) { return _finite(x) != 0; }
+inline bool IsInfinite(double x) { return _finite(x) == 0 && _isnan(x) == 0; }
+inline bool IsNaN (double x) { return _isnan(x) != 0; }
inline bool IsNormal (double x) {
int classification = _fpclass(x);
return classification == _FPCLASS_NN ||
classification == _FPCLASS_PN;
}
-#elif defined(ANDROID)
-// On Android when using the GNU STL, the C++ fpclassify functions are not
-// available. Strictly speaking, the std functions are are not standard until
-// C++11. Instead use the C99 macros on Android.
+#elif defined(ANDROID) && defined(_STLPORT_VERSION)
+
+// On Android, when using the STLPort, the C++ isnan and isnormal functions
+// are defined as macros.
inline bool IsNaN (double x) { return isnan(x); }
inline bool IsNormal (double x) { return isnormal(x); }
-
// On Android NDK r6, when using STLPort, the isinf and isfinite functions are
// not available, so reimplement them.
-# if defined(_STLPORT_VERSION)
inline bool IsInfinite(double x) {
return x == std::numeric_limits<double>::infinity() ||
x == -std::numeric_limits<double>::infinity();
@@ -72,17 +71,15 @@ inline bool IsInfinite(double x) {
inline bool IsFinite(double x) {
return !isnan(x) && !IsInfinite(x);
}
-# else
-inline bool IsFinite (double x) { return isfinite(x); }
-inline bool IsInfinite(double x) { return isinf(x); }
-# endif // defined(_STLPORT_VERSION)
-#else
+
+# else
+
// These definitions are for the normal Unix suspects.
-// TODO(keir): Test the "else" with more platforms.
inline bool IsFinite (double x) { return std::isfinite(x); }
inline bool IsInfinite(double x) { return std::isinf(x); }
inline bool IsNaN (double x) { return std::isnan(x); }
inline bool IsNormal (double x) { return std::isnormal(x); }
+
#endif
} // namespace ceres
diff --git a/include/ceres/gradient_checker.h b/include/ceres/gradient_checker.h
index 3ec8056..79ebae5 100644
--- a/include/ceres/gradient_checker.h
+++ b/include/ceres/gradient_checker.h
@@ -119,7 +119,7 @@ class GradientChecker {
// Do a consistency check between the term and the template parameters.
CHECK_EQ(M, term->num_residuals());
const int num_residuals = M;
- const vector<int16>& block_sizes = term->parameter_block_sizes();
+ const vector<int32>& block_sizes = term->parameter_block_sizes();
const int num_blocks = block_sizes.size();
CHECK_LE(num_blocks, 5) << "Unable to test functions that take more "
diff --git a/include/ceres/internal/autodiff.h b/include/ceres/internal/autodiff.h
index cf21d7a..3a96625 100644
--- a/include/ceres/internal/autodiff.h
+++ b/include/ceres/internal/autodiff.h
@@ -172,7 +172,7 @@ inline void Make1stOrderPerturbation(int offset, const T* src, JetT* dst) {
for (int j = 0; j < N; ++j) {
dst[j].a = src[j];
dst[j].v.setZero();
- dst[j].v[offset + j] = 1.0;
+ dst[j].v[offset + j] = T(1.0);
}
}
diff --git a/include/ceres/internal/disable_warnings.h b/include/ceres/internal/disable_warnings.h
new file mode 100644
index 0000000..78924de
--- /dev/null
+++ b/include/ceres/internal/disable_warnings.h
@@ -0,0 +1,44 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// This file has the sole purpose to silence warnings when including Ceres.
+
+// This is not your usual header guard. The macro CERES_WARNINGS_DISABLED
+// shows up again in reenable_warnings.h.
+#ifndef CERES_WARNINGS_DISABLED
+#define CERES_WARNINGS_DISABLED
+
+#ifdef _MSC_VER
+#pragma warning( push )
+// Disable the warning C4251 which is trigerred by stl classes in
+// Ceres' public interface. To quote MSDN: "C4251 can be ignored "
+// "if you are deriving from a type in the Standard C++ Library"
+#pragma warning( disable : 4251 )
+#endif
+
+#endif // CERES_WARNINGS_DISABLED
diff --git a/include/ceres/internal/fixed_array.h b/include/ceres/internal/fixed_array.h
index ee264d1..694070b 100644
--- a/include/ceres/internal/fixed_array.h
+++ b/include/ceres/internal/fixed_array.h
@@ -113,7 +113,6 @@ class FixedArray {
// REQUIRES: 0 <= i < size()
// Returns a reference to the "i"th element.
inline T& operator[](size_type i) {
- DCHECK_GE(i, 0);
DCHECK_LT(i, size_);
return array_[i].element;
}
@@ -121,7 +120,6 @@ class FixedArray {
// REQUIRES: 0 <= i < size()
// Returns a reference to the "i"th element.
inline const T& operator[](size_type i) const {
- DCHECK_GE(i, 0);
DCHECK_LT(i, size_);
return array_[i].element;
}
@@ -168,8 +166,6 @@ inline FixedArray<T, S>::FixedArray(typename FixedArray<T, S>::size_type n)
array_((n <= kInlineElements
? reinterpret_cast<InnerContainer*>(inline_space_)
: new InnerContainer[n])) {
- DCHECK_GE(n, size_t(0));
-
// Construct only the elements actually used.
if (array_ == reinterpret_cast<InnerContainer*>(inline_space_)) {
for (size_t i = 0; i != size_; ++i) {
diff --git a/include/ceres/internal/macros.h b/include/ceres/internal/macros.h
index 388cf30..1ed55be 100644
--- a/include/ceres/internal/macros.h
+++ b/include/ceres/internal/macros.h
@@ -145,12 +145,11 @@ char (&ArraySizeHelper(const T (&array)[N]))[N];
//
// Sprocket* AllocateSprocket() MUST_USE_RESULT;
//
-#undef MUST_USE_RESULT
#if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) \
&& !defined(COMPILER_ICC)
-#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
+#define CERES_MUST_USE_RESULT __attribute__ ((warn_unused_result))
#else
-#define MUST_USE_RESULT
+#define CERES_MUST_USE_RESULT
#endif
// Platform independent macros to get aligned memory allocations.
diff --git a/include/ceres/internal/numeric_diff.h b/include/ceres/internal/numeric_diff.h
index 4058366..5048348 100644
--- a/include/ceres/internal/numeric_diff.h
+++ b/include/ceres/internal/numeric_diff.h
@@ -90,6 +90,7 @@ struct NumericDiff {
const CostFunctor* functor,
double const* residuals_at_eval_point,
const double relative_step_size,
+ int num_residuals,
double **parameters,
double *jacobian) {
using Eigen::Map;
@@ -97,15 +98,21 @@ struct NumericDiff {
using Eigen::RowMajor;
using Eigen::ColMajor;
+ const int NUM_RESIDUALS =
+ (kNumResiduals != ceres::DYNAMIC ? kNumResiduals : num_residuals);
+
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
- typedef Matrix<double, kNumResiduals, kParameterBlockSize,
+ typedef Matrix<double,
+ kNumResiduals,
+ kParameterBlockSize,
(kParameterBlockSize == 1 &&
- kNumResiduals > 1) ? ColMajor : RowMajor> JacobianMatrix;
+ kNumResiduals > 1) ? ColMajor : RowMajor>
+ JacobianMatrix;
Map<JacobianMatrix> parameter_jacobian(jacobian,
- kNumResiduals,
+ NUM_RESIDUALS,
kParameterBlockSize);
// Mutate 1 element at a time and then restore.
@@ -125,16 +132,16 @@ struct NumericDiff {
// For each parameter in the parameter block, use finite differences to
// compute the derivative for that parameter.
+
+ ResidualVector residuals(NUM_RESIDUALS);
for (int j = 0; j < kParameterBlockSize; ++j) {
const double delta =
(step_size(j) == 0.0) ? fallback_step_size : step_size(j);
x_plus_delta(j) = x(j) + delta;
- double residuals[kNumResiduals]; // NOLINT
-
if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
- functor, parameters, residuals, functor)) {
+ functor, parameters, residuals.data(), functor)) {
return false;
}
@@ -142,8 +149,7 @@ struct NumericDiff {
// 1. Store residuals for the forward part.
// 2. Subtract residuals for the backward (or 0) part.
// 3. Divide out the run.
- parameter_jacobian.col(j) =
- Map<const ResidualVector>(residuals, kNumResiduals);
+ parameter_jacobian.col(j) = residuals;
double one_over_delta = 1.0 / delta;
if (kMethod == CENTRAL) {
@@ -151,17 +157,16 @@ struct NumericDiff {
x_plus_delta(j) = x(j) - delta;
if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
- functor, parameters, residuals, functor)) {
+ functor, parameters, residuals.data(), functor)) {
return false;
}
- parameter_jacobian.col(j) -=
- Map<ResidualVector>(residuals, kNumResiduals, 1);
+ parameter_jacobian.col(j) -= residuals;
one_over_delta /= 2;
} else {
// Forward difference only; reuse existing residuals evaluation.
parameter_jacobian.col(j) -=
- Map<const ResidualVector>(residuals_at_eval_point, kNumResiduals);
+ Map<const ResidualVector>(residuals_at_eval_point, NUM_RESIDUALS);
}
x_plus_delta(j) = x(j); // Restore x_plus_delta.
@@ -186,6 +191,7 @@ struct NumericDiff<CostFunctor, kMethod, kNumResiduals,
const CostFunctor* functor,
double const* residuals_at_eval_point,
const double relative_step_size,
+ const int num_residuals,
double **parameters,
double *jacobian) {
LOG(FATAL) << "Control should never reach here.";
diff --git a/include/ceres/internal/port.h b/include/ceres/internal/port.h
index a9fe247..e38eb71 100644
--- a/include/ceres/internal/port.h
+++ b/include/ceres/internal/port.h
@@ -31,8 +31,19 @@
#ifndef CERES_PUBLIC_INTERNAL_PORT_H_
#define CERES_PUBLIC_INTERNAL_PORT_H_
+// This file needs to compile as c code.
+#ifdef __cplusplus
+
#include <string>
+#include "ceres/internal/config.h"
+
+#if defined(CERES_TR1_MEMORY_HEADER)
+#include <tr1/memory>
+#else
+#include <memory>
+#endif
+
namespace ceres {
// It is unfortunate that this import of the entire standard namespace is
@@ -45,6 +56,33 @@ using namespace std;
// "string" implementation in the global namespace.
using std::string;
+#if defined(CERES_TR1_SHARED_PTR)
+using std::tr1::shared_ptr;
+#else
+using std::shared_ptr;
+#endif
+
} // namespace ceres
+#endif // __cplusplus
+
+// A macro to signal which functions and classes are exported when
+// building a DLL with MSVC.
+//
+// Note that the ordering here is important, CERES_BUILDING_SHARED_LIBRARY
+// is only defined locally when Ceres is compiled, it is never exported to
+// users. However, in order that we do not have to configure config.h
+// separately for building vs installing, if we are using MSVC and building
+// a shared library, then both CERES_BUILDING_SHARED_LIBRARY and
+// CERES_USING_SHARED_LIBRARY will be defined when Ceres is compiled.
+// Hence it is important that the check for CERES_BUILDING_SHARED_LIBRARY
+// happens first.
+#if defined(_MSC_VER) && defined(CERES_BUILDING_SHARED_LIBRARY)
+# define CERES_EXPORT __declspec(dllexport)
+#elif defined(_MSC_VER) && defined(CERES_USING_SHARED_LIBRARY)
+# define CERES_EXPORT __declspec(dllimport)
+#else
+# define CERES_EXPORT
+#endif
+
#endif // CERES_PUBLIC_INTERNAL_PORT_H_
diff --git a/include/ceres/internal/reenable_warnings.h b/include/ceres/internal/reenable_warnings.h
new file mode 100644
index 0000000..1f477d8
--- /dev/null
+++ b/include/ceres/internal/reenable_warnings.h
@@ -0,0 +1,38 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+
+// This is not your usual header guard. See disable_warnings.h
+#ifdef CERES_WARNINGS_DISABLED
+#undef CERES_WARNINGS_DISABLED
+
+#ifdef _MSC_VER
+#pragma warning( pop )
+#endif
+
+#endif // CERES_WARNINGS_DISABLED
diff --git a/include/ceres/iteration_callback.h b/include/ceres/iteration_callback.h
index 987c2d9..237ada6 100644
--- a/include/ceres/iteration_callback.h
+++ b/include/ceres/iteration_callback.h
@@ -36,12 +36,13 @@
#define CERES_PUBLIC_ITERATION_CALLBACK_H_
#include "ceres/types.h"
+#include "ceres/internal/disable_warnings.h"
namespace ceres {
// This struct describes the state of the optimizer after each
// iteration of the minimization.
-struct IterationSummary {
+struct CERES_EXPORT IterationSummary {
IterationSummary()
: iteration(0),
step_is_valid(false),
@@ -50,6 +51,7 @@ struct IterationSummary {
cost(0.0),
cost_change(0.0),
gradient_max_norm(0.0),
+ gradient_norm(0.0),
step_norm(0.0),
eta(0.0),
step_size(0.0),
@@ -100,6 +102,9 @@ struct IterationSummary {
// Infinity norm of the gradient vector.
double gradient_max_norm;
+ // 2-norm of the gradient vector.
+ double gradient_norm;
+
// 2-norm of the size of the step computed by the optimization
// algorithm.
double step_norm;
@@ -207,7 +212,7 @@ struct IterationSummary {
// const bool log_to_stdout_;
// };
//
-class IterationCallback {
+class CERES_EXPORT IterationCallback {
public:
virtual ~IterationCallback() {}
virtual CallbackReturnType operator()(const IterationSummary& summary) = 0;
@@ -215,4 +220,6 @@ class IterationCallback {
} // namespace ceres
+#include "ceres/internal/reenable_warnings.h"
+
#endif // CERES_PUBLIC_ITERATION_CALLBACK_H_
diff --git a/include/ceres/jet.h b/include/ceres/jet.h
index 4d2a857..81f96c7 100644
--- a/include/ceres/jet.h
+++ b/include/ceres/jet.h
@@ -106,8 +106,8 @@
// Jet<double, 2> y(1); // Pick the 1st dual number for y.
// Jet<double, 2> z = f(x, y);
//
-// LG << "df/dx = " << z.a[0]
-// << "df/dy = " << z.a[1];
+// LOG(INFO) << "df/dx = " << z.a[0]
+// << "df/dy = " << z.a[1];
//
// Most users should not use Jet objects directly; a wrapper around Jet objects,
// which makes computing the derivative, gradient, or jacobian of templated
@@ -192,6 +192,17 @@ struct Jet {
v[k] = T(1.0);
}
+ // Constructor from scalar and vector part
+ // The use of Eigen::DenseBase allows Eigen expressions
+ // to be passed in without being fully evaluated until
+ // they are assigned to v
+ template<typename Derived>
+ Jet(const T& value, const Eigen::DenseBase<Derived> &vIn)
+ : a(value),
+ v(vIn)
+ {
+ }
+
// Compound operators
Jet<T, N>& operator+=(const Jet<T, N> &y) {
*this = *this + y;
@@ -246,101 +257,70 @@ Jet<T, N> const& operator+(const Jet<T, N>& f) {
// Unary -
template<typename T, int N> inline
Jet<T, N> operator-(const Jet<T, N>&f) {
- Jet<T, N> g;
- g.a = -f.a;
- g.v = -f.v;
- return g;
+ return Jet<T, N>(-f.a, -f.v);
}
// Binary +
template<typename T, int N> inline
Jet<T, N> operator+(const Jet<T, N>& f,
const Jet<T, N>& g) {
- Jet<T, N> h;
- h.a = f.a + g.a;
- h.v = f.v + g.v;
- return h;
+ return Jet<T, N>(f.a + g.a, f.v + g.v);
}
// Binary + with a scalar: x + s
template<typename T, int N> inline
Jet<T, N> operator+(const Jet<T, N>& f, T s) {
- Jet<T, N> h;
- h.a = f.a + s;
- h.v = f.v;
- return h;
+ return Jet<T, N>(f.a + s, f.v);
}
// Binary + with a scalar: s + x
template<typename T, int N> inline
Jet<T, N> operator+(T s, const Jet<T, N>& f) {
- Jet<T, N> h;
- h.a = f.a + s;
- h.v = f.v;
- return h;
+ return Jet<T, N>(f.a + s, f.v);
}
// Binary -
template<typename T, int N> inline
Jet<T, N> operator-(const Jet<T, N>& f,
const Jet<T, N>& g) {
- Jet<T, N> h;
- h.a = f.a - g.a;
- h.v = f.v - g.v;
- return h;
+ return Jet<T, N>(f.a - g.a, f.v - g.v);
}
// Binary - with a scalar: x - s
template<typename T, int N> inline
Jet<T, N> operator-(const Jet<T, N>& f, T s) {
- Jet<T, N> h;
- h.a = f.a - s;
- h.v = f.v;
- return h;
+ return Jet<T, N>(f.a - s, f.v);
}
// Binary - with a scalar: s - x
template<typename T, int N> inline
Jet<T, N> operator-(T s, const Jet<T, N>& f) {
- Jet<T, N> h;
- h.a = s - f.a;
- h.v = -f.v;
- return h;
+ return Jet<T, N>(s - f.a, -f.v);
}
// Binary *
template<typename T, int N> inline
Jet<T, N> operator*(const Jet<T, N>& f,
const Jet<T, N>& g) {
- Jet<T, N> h;
- h.a = f.a * g.a;
- h.v = f.a * g.v + f.v * g.a;
- return h;
+ return Jet<T, N>(f.a * g.a, f.a * g.v + f.v * g.a);
}
// Binary * with a scalar: x * s
template<typename T, int N> inline
Jet<T, N> operator*(const Jet<T, N>& f, T s) {
- Jet<T, N> h;
- h.a = f.a * s;
- h.v = f.v * s;
- return h;
+ return Jet<T, N>(f.a * s, f.v * s);
}
// Binary * with a scalar: s * x
template<typename T, int N> inline
Jet<T, N> operator*(T s, const Jet<T, N>& f) {
- Jet<T, N> h;
- h.a = f.a * s;
- h.v = f.v * s;
- return h;
+ return Jet<T, N>(f.a * s, f.v * s);
}
// Binary /
template<typename T, int N> inline
Jet<T, N> operator/(const Jet<T, N>& f,
const Jet<T, N>& g) {
- Jet<T, N> h;
// This uses:
//
// a + u (a + u)(b - v) (a + u)(b - v)
@@ -349,32 +329,22 @@ Jet<T, N> operator/(const Jet<T, N>& f,
//
// which holds because v*v = 0.
const T g_a_inverse = T(1.0) / g.a;
- h.a = f.a * g_a_inverse;
const T f_a_by_g_a = f.a * g_a_inverse;
- for (int i = 0; i < N; ++i) {
- h.v[i] = (f.v[i] - f_a_by_g_a * g.v[i]) * g_a_inverse;
- }
- return h;
+ return Jet<T, N>(f.a * g_a_inverse, (f.v - f_a_by_g_a * g.v) * g_a_inverse);
}
// Binary / with a scalar: s / x
template<typename T, int N> inline
Jet<T, N> operator/(T s, const Jet<T, N>& g) {
- Jet<T, N> h;
- h.a = s / g.a;
const T minus_s_g_a_inverse2 = -s / (g.a * g.a);
- h.v = g.v * minus_s_g_a_inverse2;
- return h;
+ return Jet<T, N>(s / g.a, g.v * minus_s_g_a_inverse2);
}
// Binary / with a scalar: x / s
template<typename T, int N> inline
Jet<T, N> operator/(const Jet<T, N>& f, T s) {
- Jet<T, N> h;
const T s_inverse = 1.0 / s;
- h.a = f.a * s_inverse;
- h.v = f.v * s_inverse;
- return h;
+ return Jet<T, N>(f.a * s_inverse, f.v * s_inverse);
}
// Binary comparison operators for both scalars and jets.
@@ -433,122 +403,84 @@ Jet<T, N> abs(const Jet<T, N>& f) {
// log(a + h) ~= log(a) + h / a
template <typename T, int N> inline
Jet<T, N> log(const Jet<T, N>& f) {
- Jet<T, N> g;
- g.a = log(f.a);
const T a_inverse = T(1.0) / f.a;
- g.v = f.v * a_inverse;
- return g;
+ return Jet<T, N>(log(f.a), f.v * a_inverse);
}
// exp(a + h) ~= exp(a) + exp(a) h
template <typename T, int N> inline
Jet<T, N> exp(const Jet<T, N>& f) {
- Jet<T, N> g;
- g.a = exp(f.a);
- g.v = g.a * f.v;
- return g;
+ const T tmp = exp(f.a);
+ return Jet<T, N>(tmp, tmp * f.v);
}
// sqrt(a + h) ~= sqrt(a) + h / (2 sqrt(a))
template <typename T, int N> inline
Jet<T, N> sqrt(const Jet<T, N>& f) {
- Jet<T, N> g;
- g.a = sqrt(f.a);
- const T two_a_inverse = T(1.0) / (T(2.0) * g.a);
- g.v = f.v * two_a_inverse;
- return g;
+ const T tmp = sqrt(f.a);
+ const T two_a_inverse = T(1.0) / (T(2.0) * tmp);
+ return Jet<T, N>(tmp, f.v * two_a_inverse);
}
// cos(a + h) ~= cos(a) - sin(a) h
template <typename T, int N> inline
Jet<T, N> cos(const Jet<T, N>& f) {
- Jet<T, N> g;
- g.a = cos(f.a);
- const T sin_a = sin(f.a);
- g.v = - sin_a * f.v;
- return g;
+ return Jet<T, N>(cos(f.a), - sin(f.a) * f.v);
}
// acos(a + h) ~= acos(a) - 1 / sqrt(1 - a^2) h
template <typename T, int N> inline
Jet<T, N> acos(const Jet<T, N>& f) {
- Jet<T, N> g;
- g.a = acos(f.a);
const T tmp = - T(1.0) / sqrt(T(1.0) - f.a * f.a);
- g.v = tmp * f.v;
- return g;
+ return Jet<T, N>(acos(f.a), tmp * f.v);
}
// sin(a + h) ~= sin(a) + cos(a) h
template <typename T, int N> inline
Jet<T, N> sin(const Jet<T, N>& f) {
- Jet<T, N> g;
- g.a = sin(f.a);
- const T cos_a = cos(f.a);
- g.v = cos_a * f.v;
- return g;
+ return Jet<T, N>(sin(f.a), cos(f.a) * f.v);
}
// asin(a + h) ~= asin(a) + 1 / sqrt(1 - a^2) h
template <typename T, int N> inline
Jet<T, N> asin(const Jet<T, N>& f) {
- Jet<T, N> g;
- g.a = asin(f.a);
const T tmp = T(1.0) / sqrt(T(1.0) - f.a * f.a);
- g.v = tmp * f.v;
- return g;
+ return Jet<T, N>(asin(f.a), tmp * f.v);
}
// tan(a + h) ~= tan(a) + (1 + tan(a)^2) h
template <typename T, int N> inline
Jet<T, N> tan(const Jet<T, N>& f) {
- Jet<T, N> g;
- g.a = tan(f.a);
- double tan_a = tan(f.a);
+ const T tan_a = tan(f.a);
const T tmp = T(1.0) + tan_a * tan_a;
- g.v = tmp * f.v;
- return g;
+ return Jet<T, N>(tan_a, tmp * f.v);
}
// atan(a + h) ~= atan(a) + 1 / (1 + a^2) h
template <typename T, int N> inline
Jet<T, N> atan(const Jet<T, N>& f) {
- Jet<T, N> g;
- g.a = atan(f.a);
const T tmp = T(1.0) / (T(1.0) + f.a * f.a);
- g.v = tmp * f.v;
- return g;
+ return Jet<T, N>(atan(f.a), tmp * f.v);
}
// sinh(a + h) ~= sinh(a) + cosh(a) h
template <typename T, int N> inline
Jet<T, N> sinh(const Jet<T, N>& f) {
- Jet<T, N> g;
- g.a = sinh(f.a);
- const T cosh_a = cosh(f.a);
- g.v = cosh_a * f.v;
- return g;
+ return Jet<T, N>(sinh(f.a), cosh(f.a) * f.v);
}
// cosh(a + h) ~= cosh(a) + sinh(a) h
template <typename T, int N> inline
Jet<T, N> cosh(const Jet<T, N>& f) {
- Jet<T, N> g;
- g.a = cosh(f.a);
- const T sinh_a = sinh(f.a);
- g.v = sinh_a * f.v;
- return g;
+ return Jet<T, N>(cosh(f.a), sinh(f.a) * f.v);
}
// tanh(a + h) ~= tanh(a) + (1 - tanh(a)^2) h
template <typename T, int N> inline
Jet<T, N> tanh(const Jet<T, N>& f) {
- Jet<T, N> g;
- g.a = tanh(f.a);
- double tanh_a = tanh(f.a);
+ const T tanh_a = tanh(f.a);
const T tmp = T(1.0) - tanh_a * tanh_a;
- g.v = tmp * f.v;
- return g;
+ return Jet<T, N>(tanh_a, tmp * f.v);
}
// Jet Classification. It is not clear what the appropriate semantics are for
@@ -628,36 +560,25 @@ Jet<T, N> atan2(const Jet<T, N>& g, const Jet<T, N>& f) {
// f = a + da
// g = b + db
- Jet<T, N> out;
-
- out.a = atan2(g.a, f.a);
-
- T const temp = T(1.0) / (f.a * f.a + g.a * g.a);
- out.v = temp * (- g.a * f.v + f.a * g.v);
- return out;
+ T const tmp = T(1.0) / (f.a * f.a + g.a * g.a);
+ return Jet<T, N>(atan2(g.a, f.a), tmp * (- g.a * f.v + f.a * g.v));
}
-// pow -- base is a differentiatble function, exponent is a constant.
+// pow -- base is a differentiable function, exponent is a constant.
// (a+da)^p ~= a^p + p*a^(p-1) da
template <typename T, int N> inline
Jet<T, N> pow(const Jet<T, N>& f, double g) {
- Jet<T, N> out;
- out.a = pow(f.a, g);
- T const temp = g * pow(f.a, g - T(1.0));
- out.v = temp * f.v;
- return out;
+ T const tmp = g * pow(f.a, g - T(1.0));
+ return Jet<T, N>(pow(f.a, g), tmp * f.v);
}
// pow -- base is a constant, exponent is a differentiable function.
// (a)^(p+dp) ~= a^p + a^p log(a) dp
template <typename T, int N> inline
Jet<T, N> pow(double f, const Jet<T, N>& g) {
- Jet<T, N> out;
- out.a = pow(f, g.a);
- T const temp = log(f) * out.a;
- out.v = temp * g.v;
- return out;
+ T const tmp = pow(f, g.a);
+ return Jet<T, N>(tmp, log(f) * tmp * g.v);
}
@@ -665,15 +586,11 @@ Jet<T, N> pow(double f, const Jet<T, N>& g) {
// (a+da)^(b+db) ~= a^b + b * a^(b-1) da + a^b log(a) * db
template <typename T, int N> inline
Jet<T, N> pow(const Jet<T, N>& f, const Jet<T, N>& g) {
- Jet<T, N> out;
+ T const tmp1 = pow(f.a, g.a);
+ T const tmp2 = g.a * pow(f.a, g.a - T(1.0));
+ T const tmp3 = tmp1 * log(f.a);
- T const temp1 = pow(f.a, g.a);
- T const temp2 = g.a * pow(f.a, g.a - T(1.0));
- T const temp3 = temp1 * log(f.a);
-
- out.a = temp1;
- out.v = temp2 * f.v + temp3 * g.v;
- return out;
+ return Jet<T, N>(tmp1, tmp2 * f.v + tmp3 * g.v);
}
// Define the helper functions Eigen needs to embed Jet types.
@@ -732,6 +649,8 @@ struct NumTraits<ceres::Jet<T, N> > {
return ceres::Jet<T, N>(1e-12);
}
+ static inline Real epsilon() { return Real(std::numeric_limits<T>::epsilon()); }
+
enum {
IsComplex = 0,
IsInteger = 0,
@@ -740,7 +659,8 @@ struct NumTraits<ceres::Jet<T, N> > {
AddCost = 1,
// For Jet types, multiplication is more expensive than addition.
MulCost = 3,
- HasFloatingPoint = 1
+ HasFloatingPoint = 1,
+ RequireInitialization = 1
};
};
diff --git a/include/ceres/local_parameterization.h b/include/ceres/local_parameterization.h
index c0f7dc7..3ab21fc 100644
--- a/include/ceres/local_parameterization.h
+++ b/include/ceres/local_parameterization.h
@@ -34,6 +34,7 @@
#include <vector>
#include "ceres/internal/port.h"
+#include "ceres/internal/disable_warnings.h"
namespace ceres {
@@ -107,7 +108,7 @@ namespace ceres {
//
// The class LocalParameterization defines the function Plus and its
// Jacobian which is needed to compute the Jacobian of f w.r.t delta.
-class LocalParameterization {
+class CERES_EXPORT LocalParameterization {
public:
virtual ~LocalParameterization() {}
@@ -133,7 +134,7 @@ class LocalParameterization {
// Some basic parameterizations
// Identity Parameterization: Plus(x, delta) = x + delta
-class IdentityParameterization : public LocalParameterization {
+class CERES_EXPORT IdentityParameterization : public LocalParameterization {
public:
explicit IdentityParameterization(int size);
virtual ~IdentityParameterization() {}
@@ -150,7 +151,7 @@ class IdentityParameterization : public LocalParameterization {
};
// Hold a subset of the parameters inside a parameter block constant.
-class SubsetParameterization : public LocalParameterization {
+class CERES_EXPORT SubsetParameterization : public LocalParameterization {
public:
explicit SubsetParameterization(int size,
const vector<int>& constant_parameters);
@@ -160,7 +161,9 @@ class SubsetParameterization : public LocalParameterization {
double* x_plus_delta) const;
virtual bool ComputeJacobian(const double* x,
double* jacobian) const;
- virtual int GlobalSize() const { return constancy_mask_.size(); }
+ virtual int GlobalSize() const {
+ return static_cast<int>(constancy_mask_.size());
+ }
virtual int LocalSize() const { return local_size_; }
private:
@@ -172,7 +175,7 @@ class SubsetParameterization : public LocalParameterization {
// with * being the quaternion multiplication operator. Here we assume
// that the first element of the quaternion vector is the real (cos
// theta) part.
-class QuaternionParameterization : public LocalParameterization {
+class CERES_EXPORT QuaternionParameterization : public LocalParameterization {
public:
virtual ~QuaternionParameterization() {}
virtual bool Plus(const double* x,
@@ -186,4 +189,6 @@ class QuaternionParameterization : public LocalParameterization {
} // namespace ceres
+#include "ceres/internal/reenable_warnings.h"
+
#endif // CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
diff --git a/include/ceres/loss_function.h b/include/ceres/loss_function.h
index b99c184..2c58500 100644
--- a/include/ceres/loss_function.h
+++ b/include/ceres/loss_function.h
@@ -75,14 +75,15 @@
#ifndef CERES_PUBLIC_LOSS_FUNCTION_H_
#define CERES_PUBLIC_LOSS_FUNCTION_H_
+#include "glog/logging.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
-#include "glog/logging.h"
+#include "ceres/internal/disable_warnings.h"
namespace ceres {
-class LossFunction {
+class CERES_EXPORT LossFunction {
public:
virtual ~LossFunction() {}
@@ -128,7 +129,7 @@ class LossFunction {
// It is not normally necessary to use this, as passing NULL for the
// loss function when building the problem accomplishes the same
// thing.
-class TrivialLoss : public LossFunction {
+class CERES_EXPORT TrivialLoss : public LossFunction {
public:
virtual void Evaluate(double, double*) const;
};
@@ -171,7 +172,7 @@ class TrivialLoss : public LossFunction {
//
// The scaling parameter 'a' corresponds to 'delta' on this page:
// http://en.wikipedia.org/wiki/Huber_Loss_Function
-class HuberLoss : public LossFunction {
+class CERES_EXPORT HuberLoss : public LossFunction {
public:
explicit HuberLoss(double a) : a_(a), b_(a * a) { }
virtual void Evaluate(double, double*) const;
@@ -187,7 +188,7 @@ class HuberLoss : public LossFunction {
// rho(s) = 2 (sqrt(1 + s) - 1).
//
// At s = 0: rho = [0, 1, -1/2].
-class SoftLOneLoss : public LossFunction {
+class CERES_EXPORT SoftLOneLoss : public LossFunction {
public:
explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) { }
virtual void Evaluate(double, double*) const;
@@ -204,7 +205,7 @@ class SoftLOneLoss : public LossFunction {
// rho(s) = log(1 + s).
//
// At s = 0: rho = [0, 1, -1].
-class CauchyLoss : public LossFunction {
+class CERES_EXPORT CauchyLoss : public LossFunction {
public:
explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) { }
virtual void Evaluate(double, double*) const;
@@ -225,7 +226,7 @@ class CauchyLoss : public LossFunction {
// rho(s) = a atan(s / a).
//
// At s = 0: rho = [0, 1, 0].
-class ArctanLoss : public LossFunction {
+class CERES_EXPORT ArctanLoss : public LossFunction {
public:
explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) { }
virtual void Evaluate(double, double*) const;
@@ -264,7 +265,7 @@ class ArctanLoss : public LossFunction {
// concentrated in the range a - b to a + b.
//
// At s = 0: rho = [0, ~0, ~0].
-class TolerantLoss : public LossFunction {
+class CERES_EXPORT TolerantLoss : public LossFunction {
public:
explicit TolerantLoss(double a, double b);
virtual void Evaluate(double, double*) const;
@@ -305,7 +306,7 @@ class ComposedLoss : public LossFunction {
// function, rho = NULL is a valid input and will result in the input
// being scaled by a. This provides a simple way of implementing a
// scaled ResidualBlock.
-class ScaledLoss : public LossFunction {
+class CERES_EXPORT ScaledLoss : public LossFunction {
public:
// Constructs a ScaledLoss wrapping another loss function. Takes
// ownership of the wrapped loss function or not depending on the
@@ -362,7 +363,7 @@ class ScaledLoss : public LossFunction {
//
// Solve(options, &problem, &summary)
//
-class LossFunctionWrapper : public LossFunction {
+class CERES_EXPORT LossFunctionWrapper : public LossFunction {
public:
LossFunctionWrapper(LossFunction* rho, Ownership ownership)
: rho_(rho), ownership_(ownership) {
@@ -395,4 +396,6 @@ class LossFunctionWrapper : public LossFunction {
} // namespace ceres
+#include "ceres/internal/disable_warnings.h"
+
#endif // CERES_PUBLIC_LOSS_FUNCTION_H_
diff --git a/include/ceres/normal_prior.h b/include/ceres/normal_prior.h
index 480a074..df66505 100644
--- a/include/ceres/normal_prior.h
+++ b/include/ceres/normal_prior.h
@@ -36,6 +36,7 @@
#include "ceres/cost_function.h"
#include "ceres/internal/eigen.h"
+#include "ceres/internal/disable_warnings.h"
namespace ceres {
@@ -56,7 +57,7 @@ namespace ceres {
// which would be the case if the covariance matrix S is rank
// deficient.
-class NormalPrior: public CostFunction {
+class CERES_EXPORT NormalPrior: public CostFunction {
public:
// Check that the number of rows in the vector b are the same as the
// number of columns in the matrix A, crash otherwise.
@@ -72,4 +73,6 @@ class NormalPrior: public CostFunction {
} // namespace ceres
+#include "ceres/internal/reenable_warnings.h"
+
#endif // CERES_PUBLIC_NORMAL_PRIOR_H_
diff --git a/include/ceres/numeric_diff_cost_function.h b/include/ceres/numeric_diff_cost_function.h
index a47a66d..de6b74a 100644
--- a/include/ceres/numeric_diff_cost_function.h
+++ b/include/ceres/numeric_diff_cost_function.h
@@ -95,6 +95,21 @@
// "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing
// a 1-dimensional output from two arguments, both 2-dimensional.
//
+// NumericDiffCostFunction also supports cost functions with a
+// runtime-determined number of residuals. For example:
+//
+// CostFunction* cost_function
+// = new NumericDiffCostFunction<MyScalarCostFunctor, CENTRAL, DYNAMIC, 2, 2>(
+// new CostFunctorWithDynamicNumResiduals(1.0), ^ ^ ^
+// TAKE_OWNERSHIP, | | |
+// runtime_number_of_residuals); <----+ | | |
+// | | | |
+// | | | |
+// Actual number of residuals ------+ | | |
+// Indicate dynamic number of residuals --------------------+ | |
+// Dimension of x ------------------------------------------------+ |
+// Dimension of y ---------------------------------------------------+
+//
// The framework can currently accommodate cost functions of up to 10
// independent variables, and there is no limit on the dimensionality
// of each of them.
@@ -104,8 +119,6 @@
// central differences begin with, and only after that works, trying forward
// difference to improve performance.
//
-// TODO(sameeragarwal): Add support for dynamic number of residuals.
-//
// WARNING #1: A common beginner's error when first using
// NumericDiffCostFunction is to get the sizing wrong. In particular,
// there is a tendency to set the template parameters to (dimension of
@@ -177,17 +190,19 @@ class NumericDiffCostFunction
N5, N6, N7, N8, N9> {
public:
NumericDiffCostFunction(CostFunctor* functor,
+ Ownership ownership = TAKE_OWNERSHIP,
+ int num_residuals = kNumResiduals,
const double relative_step_size = 1e-6)
:functor_(functor),
- ownership_(TAKE_OWNERSHIP),
- relative_step_size_(relative_step_size) {}
-
- NumericDiffCostFunction(CostFunctor* functor,
- Ownership ownership,
- const double relative_step_size = 1e-6)
- : functor_(functor),
- ownership_(ownership),
- relative_step_size_(relative_step_size) {}
+ ownership_(ownership),
+ relative_step_size_(relative_step_size) {
+ if (kNumResiduals == DYNAMIC) {
+ SizedCostFunction<kNumResiduals,
+ N0, N1, N2, N3, N4,
+ N5, N6, N7, N8, N9>
+ ::set_num_residuals(num_residuals);
+ }
+ }
~NumericDiffCostFunction() {
if (ownership_ != TAKE_OWNERSHIP) {
@@ -216,7 +231,7 @@ class NumericDiffCostFunction
return false;
}
- if (!jacobians) {
+ if (jacobians == NULL) {
return true;
}
@@ -264,6 +279,9 @@ class NumericDiffCostFunction
functor_.get(), \
residuals, \
relative_step_size_, \
+ SizedCostFunction<kNumResiduals, \
+ N0, N1, N2, N3, N4, \
+ N5, N6, N7, N8, N9>::num_residuals(), \
parameters_reference_copy.get(), \
jacobians[block])) { \
return false; \
diff --git a/include/ceres/numeric_diff_functor.h b/include/ceres/numeric_diff_functor.h
index 039e1a1..a29eb97 100644
--- a/include/ceres/numeric_diff_functor.h
+++ b/include/ceres/numeric_diff_functor.h
@@ -124,6 +124,8 @@ class NumericDiffFunctor {
kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9>(new Functor,
+ TAKE_OWNERSHIP,
+ kNumResiduals,
relative_step_size)) {
}
@@ -133,7 +135,10 @@ class NumericDiffFunctor {
kNumResiduals,
N0, N1, N2, N3, N4,
N5, N6, N7, N8, N9>(
- functor, relative_step_size)) {
+ functor,
+ TAKE_OWNERSHIP,
+ kNumResiduals,
+ relative_step_size)) {
}
bool operator()(const double* x0, double* residuals) const {
diff --git a/include/ceres/ordered_groups.h b/include/ceres/ordered_groups.h
index e373d35..c316d71 100644
--- a/include/ceres/ordered_groups.h
+++ b/include/ceres/ordered_groups.h
@@ -33,7 +33,9 @@
#include <map>
#include <set>
+#include <vector>
#include "ceres/internal/port.h"
+#include "glog/logging.h"
namespace ceres {
@@ -84,11 +86,8 @@ class OrderedGroups {
element_to_group_.clear();
}
- // Remove the element, no matter what group it is in. If the element
- // is not a member of any group, calling this method will result in
- // a crash.
- //
- // Return value indicates if the element was actually removed.
+ // Remove the element, no matter what group it is in. Return value
+ // indicates if the element was actually removed.
bool Remove(const T element) {
const int current_group = GroupId(element);
if (current_group < 0) {
@@ -106,6 +105,20 @@ class OrderedGroups {
return true;
}
+ // Bulk remove elements. The return value indicates the number of
+ // elements successfully removed.
+ int Remove(const vector<T>& elements) {
+ if (NumElements() == 0 || elements.size() == 0) {
+ return 0;
+ }
+
+ int num_removed = 0;
+ for (int i = 0; i < elements.size(); ++i) {
+ num_removed += Remove(elements[i]);
+ }
+ return num_removed;
+ }
+
// Reverse the order of the groups in place.
void Reverse() {
typename map<int, set<T> >::reverse_iterator it =
@@ -159,10 +172,22 @@ class OrderedGroups {
return group_to_elements_.size();
}
+ // The first group with one or more elements. Calling this when
+ // there are no groups with non-zero elements will result in a
+ // crash.
+ int MinNonZeroGroup() const {
+ CHECK_NE(NumGroups(), 0);
+ return group_to_elements_.begin()->first;
+ }
+
const map<int, set<T> >& group_to_elements() const {
return group_to_elements_;
}
+ const map<T, int>& element_to_group() const {
+ return element_to_group_;
+ }
+
private:
map<int, set<T> > group_to_elements_;
map<T, int> element_to_group_;
diff --git a/include/ceres/problem.h b/include/ceres/problem.h
index 663616d..b1cb99a 100644
--- a/include/ceres/problem.h
+++ b/include/ceres/problem.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -39,11 +39,12 @@
#include <set>
#include <vector>
+#include "glog/logging.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
-#include "glog/logging.h"
+#include "ceres/internal/disable_warnings.h"
namespace ceres {
@@ -117,14 +118,14 @@ typedef internal::ResidualBlock* ResidualBlockId;
// problem.AddResidualBlock(new MyBinaryCostFunction(...), x2, x3);
//
// Please see cost_function.h for details of the CostFunction object.
-class Problem {
+class CERES_EXPORT Problem {
public:
- struct Options {
+ struct CERES_EXPORT Options {
Options()
: cost_function_ownership(TAKE_OWNERSHIP),
loss_function_ownership(TAKE_OWNERSHIP),
local_parameterization_ownership(TAKE_OWNERSHIP),
- enable_fast_parameter_block_removal(false),
+ enable_fast_removal(false),
disable_all_safety_checks(false) {}
// These flags control whether the Problem object owns the cost
@@ -138,17 +139,21 @@ class Problem {
Ownership loss_function_ownership;
Ownership local_parameterization_ownership;
- // If true, trades memory for a faster RemoveParameterBlock() operation.
+ // If true, trades memory for faster RemoveResidualBlock() and
+ // RemoveParameterBlock() operations.
//
- // RemoveParameterBlock() takes time proportional to the size of the entire
- // Problem. If you only remove parameter blocks from the Problem
- // occassionaly, this may be acceptable. However, if you are modifying the
- // Problem frequently, and have memory to spare, then flip this switch to
+ // By default, RemoveParameterBlock() and RemoveResidualBlock() take time
+ // proportional to the size of the entire problem. If you only ever remove
+ // parameters or residuals from the problem occassionally, this might be
+ // acceptable. However, if you have memory to spare, enable this option to
// make RemoveParameterBlock() take time proportional to the number of
- // residual blocks that depend on it. The increase in memory usage is an
- // additonal hash set per parameter block containing all the residuals that
- // depend on the parameter block.
- bool enable_fast_parameter_block_removal;
+ // residual blocks that depend on it, and RemoveResidualBlock() take (on
+ // average) constant time.
+ //
+ // The increase in memory usage is twofold: an additonal hash set per
+ // parameter block containing all the residuals that depend on the parameter
+ // block; and a hash set in the problem containing all residuals.
+ bool enable_fast_removal;
// By default, Ceres performs a variety of safety checks when constructing
// the problem. There is a small but measurable performance penalty to
@@ -276,7 +281,7 @@ class Problem {
// residual blocks that depend on the parameter are also removed, as
// described above in RemoveResidualBlock().
//
- // If Problem::Options::enable_fast_parameter_block_removal is true, then the
+ // If Problem::Options::enable_fast_removal is true, then the
// removal is fast (almost constant time). Otherwise, removing a parameter
// block will incur a scan of the entire Problem object.
//
@@ -300,7 +305,7 @@ class Problem {
// Hold the indicated parameter block constant during optimization.
void SetParameterBlockConstant(double* values);
- // Allow the indicated parameter to vary during optimization.
+ // Allow the indicated parameter block to vary during optimization.
void SetParameterBlockVariable(double* values);
// Set the local parameterization for one of the parameter blocks.
@@ -312,6 +317,15 @@ class Problem {
void SetParameterization(double* values,
LocalParameterization* local_parameterization);
+ // Get the local parameterization object associated with this
+ // parameter block. If there is no parameterization object
+ // associated then NULL is returned.
+ const LocalParameterization* GetParameterization(double* values) const;
+
+ // Set the lower/upper bound for the parameter with position "index".
+ void SetParameterLowerBound(double* values, int index, double lower_bound);
+ void SetParameterUpperBound(double* values, int index, double upper_bound);
+
// Number of parameter blocks in the problem. Always equals
// parameter_blocks().size() and parameter_block_sizes().size().
int NumParameterBlocks() const;
@@ -336,11 +350,34 @@ class Problem {
// block, then ParameterBlockLocalSize = ParameterBlockSize.
int ParameterBlockLocalSize(const double* values) const;
+ // Is the given parameter block present in this problem or not?
+ bool HasParameterBlock(const double* values) const;
+
// Fills the passed parameter_blocks vector with pointers to the
// parameter blocks currently in the problem. After this call,
// parameter_block.size() == NumParameterBlocks.
void GetParameterBlocks(vector<double*>* parameter_blocks) const;
+ // Fills the passed residual_blocks vector with pointers to the
+ // residual blocks currently in the problem. After this call,
+ // residual_blocks.size() == NumResidualBlocks.
+ void GetResidualBlocks(vector<ResidualBlockId>* residual_blocks) const;
+
+ // Get all the parameter blocks that depend on the given residual block.
+ void GetParameterBlocksForResidualBlock(
+ const ResidualBlockId residual_block,
+ vector<double*>* parameter_blocks) const;
+
+ // Get all the residual blocks that depend on the given parameter block.
+ //
+ // If Problem::Options::enable_fast_removal is true, then
+ // getting the residual blocks is fast and depends only on the number of
+ // residual blocks. Otherwise, getting the residual blocks for a parameter
+ // block will incur a scan of the entire Problem object.
+ void GetResidualBlocksForParameterBlock(
+ const double* values,
+ vector<ResidualBlockId>* residual_blocks) const;
+
// Options struct to control Problem::Evaluate.
struct EvaluateOptions {
EvaluateOptions()
@@ -430,4 +467,6 @@ class Problem {
} // namespace ceres
+#include "ceres/internal/reenable_warnings.h"
+
#endif // CERES_PUBLIC_PROBLEM_H_
diff --git a/include/ceres/rotation.h b/include/ceres/rotation.h
index ea0b769..e3dbfe8 100644
--- a/include/ceres/rotation.h
+++ b/include/ceres/rotation.h
@@ -395,7 +395,7 @@ void AngleAxisToRotationMatrix(
const MatrixAdapter<T, row_stride, col_stride>& R) {
static const T kOne = T(1.0);
const T theta2 = DotProduct(angle_axis, angle_axis);
- if (theta2 > 0.0) {
+ if (theta2 > T(std::numeric_limits<double>::epsilon())) {
// We want to be careful to only evaluate the square root if the
// norm of the angle_axis vector is greater than zero. Otherwise
// we get a division by zero.
@@ -417,15 +417,15 @@ void AngleAxisToRotationMatrix(
R(1, 2) = -wx*sintheta + wy*wz*(kOne - costheta);
R(2, 2) = costheta + wz*wz*(kOne - costheta);
} else {
- // At zero, we switch to using the first order Taylor expansion.
+ // Near zero, we switch to using the first order Taylor expansion.
R(0, 0) = kOne;
- R(1, 0) = -angle_axis[2];
- R(2, 0) = angle_axis[1];
- R(0, 1) = angle_axis[2];
+ R(1, 0) = angle_axis[2];
+ R(2, 0) = -angle_axis[1];
+ R(0, 1) = -angle_axis[2];
R(1, 1) = kOne;
- R(2, 1) = -angle_axis[0];
- R(0, 2) = -angle_axis[1];
- R(1, 2) = angle_axis[0];
+ R(2, 1) = angle_axis[0];
+ R(0, 2) = angle_axis[1];
+ R(1, 2) = -angle_axis[0];
R(2, 2) = kOne;
}
}
@@ -580,12 +580,8 @@ T DotProduct(const T x[3], const T y[3]) {
template<typename T> inline
void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
- T w[3];
- T sintheta;
- T costheta;
-
const T theta2 = DotProduct(angle_axis, angle_axis);
- if (theta2 > 0.0) {
+ if (theta2 > T(std::numeric_limits<double>::epsilon())) {
// Away from zero, use the rodriguez formula
//
// result = pt costheta +
@@ -597,19 +593,25 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
// we get a division by zero.
//
const T theta = sqrt(theta2);
- w[0] = angle_axis[0] / theta;
- w[1] = angle_axis[1] / theta;
- w[2] = angle_axis[2] / theta;
- costheta = cos(theta);
- sintheta = sin(theta);
- T w_cross_pt[3];
- CrossProduct(w, pt, w_cross_pt);
- T w_dot_pt = DotProduct(w, pt);
- for (int i = 0; i < 3; ++i) {
- result[i] = pt[i] * costheta +
- w_cross_pt[i] * sintheta +
- w[i] * (T(1.0) - costheta) * w_dot_pt;
- }
+ const T costheta = cos(theta);
+ const T sintheta = sin(theta);
+ const T theta_inverse = 1.0 / theta;
+
+ const T w[3] = { angle_axis[0] * theta_inverse,
+ angle_axis[1] * theta_inverse,
+ angle_axis[2] * theta_inverse };
+
+ // Explicitly inlined evaluation of the cross product for
+ // performance reasons.
+ const T w_cross_pt[3] = { w[1] * pt[2] - w[2] * pt[1],
+ w[2] * pt[0] - w[0] * pt[2],
+ w[0] * pt[1] - w[1] * pt[0] };
+ const T tmp =
+ (w[0] * pt[0] + w[1] * pt[1] + w[2] * pt[2]) * (T(1.0) - costheta);
+
+ result[0] = pt[0] * costheta + w_cross_pt[0] * sintheta + w[0] * tmp;
+ result[1] = pt[1] * costheta + w_cross_pt[1] * sintheta + w[1] * tmp;
+ result[2] = pt[2] * costheta + w_cross_pt[2] * sintheta + w[2] * tmp;
} else {
// Near zero, the first order Taylor approximation of the rotation
// matrix R corresponding to a vector w and angle w is
@@ -623,13 +625,18 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
// and actually performing multiplication with the point pt, gives us
// R * pt = pt + w x pt.
//
- // Switching to the Taylor expansion at zero helps avoid all sorts
- // of numerical nastiness.
- T w_cross_pt[3];
- CrossProduct(angle_axis, pt, w_cross_pt);
- for (int i = 0; i < 3; ++i) {
- result[i] = pt[i] + w_cross_pt[i];
- }
+ // Switching to the Taylor expansion near zero provides meaningful
+ // derivatives when evaluated using Jets.
+ //
+ // Explicitly inlined evaluation of the cross product for
+ // performance reasons.
+ const T w_cross_pt[3] = { angle_axis[1] * pt[2] - angle_axis[2] * pt[1],
+ angle_axis[2] * pt[0] - angle_axis[0] * pt[2],
+ angle_axis[0] * pt[1] - angle_axis[1] * pt[0] };
+
+ result[0] = pt[0] + w_cross_pt[0];
+ result[1] = pt[1] + w_cross_pt[1];
+ result[2] = pt[2] + w_cross_pt[2];
}
}
diff --git a/include/ceres/solver.h b/include/ceres/solver.h
index 25b762a..fdc5457 100644
--- a/include/ceres/solver.h
+++ b/include/ceres/solver.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -40,13 +40,14 @@
#include "ceres/iteration_callback.h"
#include "ceres/ordered_groups.h"
#include "ceres/types.h"
+#include "ceres/internal/disable_warnings.h"
namespace ceres {
class Problem;
// Interface for non-linear least squares solvers.
-class Solver {
+class CERES_EXPORT Solver {
public:
virtual ~Solver();
@@ -55,7 +56,7 @@ class Solver {
// problems; however, better performance is often obtainable with tweaking.
//
// The constants are defined inside types.h
- struct Options {
+ struct CERES_EXPORT Options {
// Default constructor that sets up a generic sparse problem.
Options() {
minimizer_type = TRUST_REGION;
@@ -91,31 +92,40 @@ class Solver {
gradient_tolerance = 1e-10;
parameter_tolerance = 1e-8;
-#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE) && !defined(CERES_ENABLE_LGPL_CODE)
linear_solver_type = DENSE_QR;
#else
linear_solver_type = SPARSE_NORMAL_CHOLESKY;
#endif
preconditioner_type = JACOBI;
-
+ visibility_clustering_type = CANONICAL_VIEWS;
dense_linear_algebra_library_type = EIGEN;
+
+ // Choose a default sparse linear algebra library in the order:
+ //
+ // SUITE_SPARSE > CX_SPARSE > EIGEN_SPARSE
+#if !defined(CERES_NO_SUITESPARSE)
sparse_linear_algebra_library_type = SUITE_SPARSE;
-#if defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CXSPARSE)
+#else
+ #if !defined(CERES_NO_CXSPARSE)
sparse_linear_algebra_library_type = CX_SPARSE;
+ #else
+ #if defined(CERES_USE_EIGEN_SPARSE)
+ sparse_linear_algebra_library_type = EIGEN_SPARSE;
+ #endif
+ #endif
#endif
-
num_linear_solver_threads = 1;
- linear_solver_ordering = NULL;
use_postordering = false;
+ dynamic_sparsity = false;
min_linear_solver_iterations = 1;
max_linear_solver_iterations = 500;
eta = 1e-1;
jacobi_scaling = true;
use_inner_iterations = false;
inner_iteration_tolerance = 1e-3;
- inner_iteration_ordering = NULL;
logging_type = PER_MINIMIZER_ITERATION;
minimizer_progress_to_stdout = false;
trust_region_problem_dump_directory = "/tmp";
@@ -126,7 +136,11 @@ class Solver {
update_state_every_iteration = false;
}
- ~Options();
+ // Returns true if the options struct has a valid
+ // configuration. Returns false otherwise, and fills in *error
+ // with a message describing the problem.
+ bool IsValid(string* error) const;
+
// Minimizer options ----------------------------------------
// Ceres supports the two major families of optimization strategies -
@@ -367,7 +381,7 @@ class Solver {
// Minimizer terminates when
//
- // max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i|
+ // max_i |x - Project(Plus(x, -g(x))| < gradient_tolerance
//
// This value should typically be 1e-4 * function_tolerance.
double gradient_tolerance;
@@ -385,6 +399,11 @@ class Solver {
// Type of preconditioner to use with the iterative linear solvers.
PreconditionerType preconditioner_type;
+ // Type of clustering algorithm to use for visibility based
+ // preconditioning. This option is used only when the
+ // preconditioner_type is CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL.
+ VisibilityClusteringType visibility_clustering_type;
+
// Ceres supports using multiple dense linear algebra libraries
// for dense matrix factorizations. Currently EIGEN and LAPACK are
// the valid choices. EIGEN is always available, LAPACK refers to
@@ -475,10 +494,7 @@ class Solver {
// the parameter blocks into two groups, one for the points and one
// for the cameras, where the group containing the points has an id
// smaller than the group containing cameras.
- //
- // Once assigned, Solver::Options owns this pointer and will
- // deallocate the memory when destroyed.
- ParameterBlockOrdering* linear_solver_ordering;
+ shared_ptr<ParameterBlockOrdering> linear_solver_ordering;
// Sparse Cholesky factorization algorithms use a fill-reducing
// ordering to permute the columns of the Jacobian matrix. There
@@ -501,6 +517,21 @@ class Solver {
// matrix. Setting use_postordering to true enables this tradeoff.
bool use_postordering;
+ // Some non-linear least squares problems are symbolically dense but
+ // numerically sparse. i.e. at any given state only a small number
+ // of jacobian entries are non-zero, but the position and number of
+ // non-zeros is different depending on the state. For these problems
+ // it can be useful to factorize the sparse jacobian at each solver
+ // iteration instead of including all of the zero entries in a single
+ // general factorization.
+ //
+ // If your problem does not have this property (or you do not know),
+ // then it is probably best to keep this false, otherwise it will
+ // likely lead to worse performance.
+
+ // This settings affects the SPARSE_NORMAL_CHOLESKY solver.
+ bool dynamic_sparsity;
+
// Some non-linear least squares problems have additional
// structure in the way the parameter blocks interact that it is
// beneficial to modify the way the trust region step is computed.
@@ -571,7 +602,7 @@ class Solver {
// the lower numbered groups are optimized before the higher
// number groups. Each group must be an independent set. Not
// all parameter blocks need to be present in the ordering.
- ParameterBlockOrdering* inner_iteration_ordering;
+ shared_ptr<ParameterBlockOrdering> inner_iteration_ordering;
// Generally speaking, inner iterations make significant progress
// in the early stages of the solve and then their contribution
@@ -692,13 +723,9 @@ class Solver {
//
// The solver does NOT take ownership of these pointers.
vector<IterationCallback*> callbacks;
-
- // If non-empty, a summary of the execution of the solver is
- // recorded to this file.
- string solver_log;
};
- struct Summary {
+ struct CERES_EXPORT Summary {
Summary();
// A brief one line description of the state of the solver after
@@ -709,18 +736,22 @@ class Solver {
// termination.
string FullReport() const;
+ bool IsSolutionUsable() const;
+
// Minimizer summary -------------------------------------------------
MinimizerType minimizer_type;
- SolverTerminationType termination_type;
+ TerminationType termination_type;
- // If the solver did not run, or there was a failure, a
- // description of the error.
- string error;
+ // Reason why the solver terminated.
+ string message;
- // Cost of the problem before and after the optimization. See
- // problem.h for definition of the cost of a problem.
+ // Cost of the problem (value of the objective function) before
+ // the optimization.
double initial_cost;
+
+ // Cost of the problem (value of the objective function) after the
+ // optimization.
double final_cost;
// The part of the total cost that comes from residual blocks that
@@ -728,10 +759,21 @@ class Solver {
// blocks that they depend on were fixed.
double fixed_cost;
+ // IterationSummary for each minimizer iteration in order.
vector<IterationSummary> iterations;
+ // Number of minimizer iterations in which the step was
+ // accepted. Unless use_non_monotonic_steps is true this is also
+ // the number of steps in which the objective function value/cost
+ // went down.
int num_successful_steps;
+
+ // Number of minimizer iterations in which the step was rejected
+ // either because it did not reduce the cost enough or the step
+ // was not numerically valid.
int num_unsuccessful_steps;
+
+ // Number of times inner iterations were performed.
int num_inner_iteration_steps;
// All times reported below are wall times.
@@ -753,58 +795,160 @@ class Solver {
// Some total of all time spent inside Ceres when Solve is called.
double total_time_in_seconds;
+ // Time (in seconds) spent in the linear solver computing the
+ // trust region step.
double linear_solver_time_in_seconds;
+
+ // Time (in seconds) spent evaluating the residual vector.
double residual_evaluation_time_in_seconds;
+
+ // Time (in seconds) spent evaluating the jacobian matrix.
double jacobian_evaluation_time_in_seconds;
+
+ // Time (in seconds) spent doing inner iterations.
double inner_iteration_time_in_seconds;
- // Preprocessor summary.
+ // Number of parameter blocks in the problem.
int num_parameter_blocks;
+
+ // Number of parameters in the probem.
int num_parameters;
+
+ // Dimension of the tangent space of the problem (or the number of
+ // columns in the Jacobian for the problem). This is different
+ // from num_parameters if a parameter block is associated with a
+ // LocalParameterization
int num_effective_parameters;
+
+ // Number of residual blocks in the problem.
int num_residual_blocks;
+
+ // Number of residuals in the problem.
int num_residuals;
+ // Number of parameter blocks in the problem after the inactive
+ // and constant parameter blocks have been removed. A parameter
+ // block is inactive if no residual block refers to it.
int num_parameter_blocks_reduced;
+
+ // Number of parameters in the reduced problem.
int num_parameters_reduced;
+
+ // Dimension of the tangent space of the reduced problem (or the
+ // number of columns in the Jacobian for the reduced
+ // problem). This is different from num_parameters_reduced if a
+ // parameter block in the reduced problem is associated with a
+ // LocalParameterization.
int num_effective_parameters_reduced;
+
+ // Number of residual blocks in the reduced problem.
int num_residual_blocks_reduced;
- int num_residuals_reduced;
- int num_eliminate_blocks_given;
- int num_eliminate_blocks_used;
+ // Number of residuals in the reduced problem.
+ int num_residuals_reduced;
+ // Number of threads specified by the user for Jacobian and
+ // residual evaluation.
int num_threads_given;
+
+ // Number of threads actually used by the solver for Jacobian and
+ // residual evaluation. This number is not equal to
+ // num_threads_given if OpenMP is not available.
int num_threads_used;
+ // Number of threads specified by the user for solving the trust
+ // region problem.
int num_linear_solver_threads_given;
+
+ // Number of threads actually used by the solver for solving the
+ // trust region problem. This number is not equal to
+ // num_threads_given if OpenMP is not available.
int num_linear_solver_threads_used;
+ // Type of the linear solver requested by the user.
LinearSolverType linear_solver_type_given;
+
+ // Type of the linear solver actually used. This may be different
+ // from linear_solver_type_given if Ceres determines that the
+ // problem structure is not compatible with the linear solver
+ // requested or if the linear solver requested by the user is not
+ // available, e.g. The user requested SPARSE_NORMAL_CHOLESKY but
+ // no sparse linear algebra library was available.
LinearSolverType linear_solver_type_used;
+ // Size of the elimination groups given by the user as hints to
+ // the linear solver.
vector<int> linear_solver_ordering_given;
+
+ // Size of the parameter groups used by the solver when ordering
+ // the columns of the Jacobian. This maybe different from
+ // linear_solver_ordering_given if the user left
+ // linear_solver_ordering_given blank and asked for an automatic
+ // ordering, or if the problem contains some constant or inactive
+ // parameter blocks.
vector<int> linear_solver_ordering_used;
+ // True if the user asked for inner iterations to be used as part
+ // of the optimization.
bool inner_iterations_given;
+
+ // True if the user asked for inner iterations to be used as part
+ // of the optimization and the problem structure was such that
+ // they were actually performed. e.g., in a problem with just one
+ // parameter block, inner iterations are not performed.
bool inner_iterations_used;
+ // Size of the parameter groups given by the user for performing
+ // inner iterations.
vector<int> inner_iteration_ordering_given;
+
+ // Size of the parameter groups given used by the solver for
+ // performing inner iterations. This maybe different from
+ // inner_iteration_ordering_given if the user left
+ // inner_iteration_ordering_given blank and asked for an automatic
+ // ordering, or if the problem contains some constant or inactive
+ // parameter blocks.
vector<int> inner_iteration_ordering_used;
+ // Type of preconditioner used for solving the trust region
+ // step. Only meaningful when an iterative linear solver is used.
PreconditionerType preconditioner_type;
+ // Type of clustering algorithm used for visibility based
+ // preconditioning. Only meaningful when the preconditioner_type
+ // is CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL.
+ VisibilityClusteringType visibility_clustering_type;
+
+ // Type of trust region strategy.
TrustRegionStrategyType trust_region_strategy_type;
+
+ // Type of dogleg strategy used for solving the trust region
+ // problem.
DoglegType dogleg_type;
+ // Type of the dense linear algebra library used.
DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
+
+ // Type of the sparse linear algebra library used.
SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
+ // Type of line search direction used.
LineSearchDirectionType line_search_direction_type;
+
+ // Type of the line search algorithm used.
LineSearchType line_search_type;
+
+ // When performing line search, the degree of the polynomial used
+ // to approximate the objective function.
LineSearchInterpolationType line_search_interpolation_type;
+
+ // If the line search direction is NONLINEAR_CONJUGATE_GRADIENT,
+ // then this indicates the particular variant of non-linear
+ // conjugate gradient used.
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
+ // If the type of the line search direction is LBFGS, then this
+ // indicates the rank of the Hessian approximation.
int max_lbfgs_rank;
};
@@ -819,10 +963,12 @@ class Solver {
};
// Helper function which avoids going through the interface.
-void Solve(const Solver::Options& options,
+CERES_EXPORT void Solve(const Solver::Options& options,
Problem* problem,
Solver::Summary* summary);
} // namespace ceres
+#include "ceres/internal/reenable_warnings.h"
+
#endif // CERES_PUBLIC_SOLVER_H_
diff --git a/include/ceres/types.h b/include/ceres/types.h
index ffa743a..a07c893 100644
--- a/include/ceres/types.h
+++ b/include/ceres/types.h
@@ -40,12 +40,12 @@
#include <string>
#include "ceres/internal/port.h"
+#include "ceres/internal/disable_warnings.h"
namespace ceres {
// Basic integer types. These typedefs are in the Ceres namespace to avoid
// conflicts with other packages having similar typedefs.
-typedef short int16;
typedef int int32;
// Argument type used in interfaces that can optionally take ownership
@@ -102,28 +102,62 @@ enum PreconditionerType {
// Block diagonal of the Gauss-Newton Hessian.
JACOBI,
+ // Note: The following three preconditioners can only be used with
+ // the ITERATIVE_SCHUR solver. They are well suited for Structure
+ // from Motion problems.
+
// Block diagonal of the Schur complement. This preconditioner may
// only be used with the ITERATIVE_SCHUR solver.
SCHUR_JACOBI,
// Visibility clustering based preconditioners.
//
- // These preconditioners are well suited for Structure from Motion
- // problems, particularly problems arising from community photo
- // collections. These preconditioners use the visibility structure
- // of the scene to determine the sparsity structure of the
- // preconditioner. Requires SuiteSparse/CHOLMOD.
+ // The following two preconditioners use the visibility structure of
+ // the scene to determine the sparsity structure of the
+ // preconditioner. This is done using a clustering algorithm. The
+ // available visibility clustering algorithms are described below.
+ //
+ // Note: Requires SuiteSparse.
CLUSTER_JACOBI,
CLUSTER_TRIDIAGONAL
};
+enum VisibilityClusteringType {
+ // Canonical views algorithm as described in
+ //
+ // "Scene Summarization for Online Image Collections", Ian Simon, Noah
+ // Snavely, Steven M. Seitz, ICCV 2007.
+ //
+ // This clustering algorithm can be quite slow, but gives high
+ // quality clusters. The original visibility based clustering paper
+ // used this algorithm.
+ CANONICAL_VIEWS,
+
+ // The classic single linkage algorithm. It is extremely fast as
+ // compared to CANONICAL_VIEWS, but can give slightly poorer
+ // results. For problems with large number of cameras though, this
+ // is generally a pretty good option.
+ //
+ // If you are using SCHUR_JACOBI preconditioner and have SuiteSparse
+ // available, CLUSTER_JACOBI and CLUSTER_TRIDIAGONAL in combination
+ // with the SINGLE_LINKAGE algorithm will generally give better
+ // results.
+ SINGLE_LINKAGE
+};
+
enum SparseLinearAlgebraLibraryType {
// High performance sparse Cholesky factorization and approximate
// minimum degree ordering.
SUITE_SPARSE,
- // A lightweight replacment for SuiteSparse.
- CX_SPARSE
+ // A lightweight replacment for SuiteSparse, which does not require
+ // a LAPACK/BLAS implementation. Consequently, its performance is
+ // also a bit lower than SuiteSparse.
+ CX_SPARSE,
+
+ // Eigen's sparse linear algebra routines. In particular Ceres uses
+ // the Simplicial LDLT routines.
+ EIGEN_SPARSE
};
enum DenseLinearAlgebraLibraryType {
@@ -131,26 +165,6 @@ enum DenseLinearAlgebraLibraryType {
LAPACK
};
-enum LinearSolverTerminationType {
- // Termination criterion was met. For factorization based solvers
- // the tolerance is assumed to be zero. Any user provided values are
- // ignored.
- TOLERANCE,
-
- // Solver ran for max_num_iterations and terminated before the
- // termination tolerance could be satified.
- MAX_ITERATIONS,
-
- // Solver is stuck and further iterations will not result in any
- // measurable progress.
- STAGNATION,
-
- // Solver failed. Solver was terminated due to numerical errors. The
- // exact cause of failure depends on the particular solver being
- // used.
- FAILURE
-};
-
// Logging options
// The options get progressively noisier.
enum LoggingType {
@@ -239,7 +253,7 @@ enum LineSearchDirectionType {
// details see Numerical Optimization by Nocedal & Wright.
enum NonlinearConjugateGradientType {
FLETCHER_REEVES,
- POLAK_RIBIRERE,
+ POLAK_RIBIERE,
HESTENES_STIEFEL,
};
@@ -293,41 +307,42 @@ enum DoglegType {
SUBSPACE_DOGLEG
};
-enum SolverTerminationType {
- // The minimizer did not run at all; usually due to errors in the user's
- // Problem or the solver options.
- DID_NOT_RUN,
+enum TerminationType {
+ // Minimizer terminated because one of the convergence criterion set
+ // by the user was satisfied.
+ //
+ // 1. (new_cost - old_cost) < function_tolerance * old_cost;
+ // 2. max_i |gradient_i| < gradient_tolerance
+ // 3. |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance)
+ //
+ // The user's parameter blocks will be updated with the solution.
+ CONVERGENCE,
- // The solver ran for maximum number of iterations specified by the
- // user, but none of the convergence criterion specified by the user
- // were met.
+ // The solver ran for maximum number of iterations or maximum amount
+ // of time specified by the user, but none of the convergence
+ // criterion specified by the user were met. The user's parameter
+ // blocks will be updated with the solution found so far.
NO_CONVERGENCE,
- // Minimizer terminated because
- // (new_cost - old_cost) < function_tolerance * old_cost;
- FUNCTION_TOLERANCE,
-
- // Minimizer terminated because
- // max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i|
- GRADIENT_TOLERANCE,
-
- // Minimized terminated because
- // |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance)
- PARAMETER_TOLERANCE,
-
- // The minimizer terminated because it encountered a numerical error
- // that it could not recover from.
- NUMERICAL_FAILURE,
+ // The minimizer terminated because of an error. The user's
+ // parameter blocks will not be updated.
+ FAILURE,
// Using an IterationCallback object, user code can control the
// minimizer. The following enums indicate that the user code was
// responsible for termination.
+ //
+ // Minimizer terminated successfully because a user
+ // IterationCallback returned SOLVER_TERMINATE_SUCCESSFULLY.
+ //
+ // The user's parameter blocks will be updated with the solution.
+ USER_SUCCESS,
- // User's IterationCallback returned SOLVER_ABORT.
- USER_ABORT,
-
- // User's IterationCallback returned SOLVER_TERMINATE_SUCCESSFULLY
- USER_SUCCESS
+ // Minimizer terminated because because a user IterationCallback
+ // returned SOLVER_ABORT.
+ //
+ // The user's parameter blocks will not be updated.
+ USER_FAILURE
};
// Enums used by the IterationCallback instances to indicate to the
@@ -370,9 +385,9 @@ enum DumpFormatType {
TEXTFILE
};
-// For SizedCostFunction and AutoDiffCostFunction, DYNAMIC can be specified for
-// the number of residuals. If specified, then the number of residuas for that
-// cost function can vary at runtime.
+// For SizedCostFunction and AutoDiffCostFunction, DYNAMIC can be
+// specified for the number of residuals. If specified, then the
+// number of residuas for that cost function can vary at runtime.
enum DimensionType {
DYNAMIC = -1
};
@@ -390,74 +405,83 @@ enum LineSearchInterpolationType {
enum CovarianceAlgorithmType {
DENSE_SVD,
- SPARSE_CHOLESKY,
- SPARSE_QR
+ SUITE_SPARSE_QR,
+ EIGEN_SPARSE_QR
};
-const char* LinearSolverTypeToString(LinearSolverType type);
-bool StringToLinearSolverType(string value, LinearSolverType* type);
+CERES_EXPORT const char* LinearSolverTypeToString(
+ LinearSolverType type);
+CERES_EXPORT bool StringToLinearSolverType(string value,
+ LinearSolverType* type);
-const char* PreconditionerTypeToString(PreconditionerType type);
-bool StringToPreconditionerType(string value, PreconditionerType* type);
+CERES_EXPORT const char* PreconditionerTypeToString(PreconditionerType type);
+CERES_EXPORT bool StringToPreconditionerType(string value,
+ PreconditionerType* type);
-const char* SparseLinearAlgebraLibraryTypeToString(
+CERES_EXPORT const char* VisibilityClusteringTypeToString(
+ VisibilityClusteringType type);
+CERES_EXPORT bool StringToVisibilityClusteringType(string value,
+ VisibilityClusteringType* type);
+
+CERES_EXPORT const char* SparseLinearAlgebraLibraryTypeToString(
SparseLinearAlgebraLibraryType type);
-bool StringToSparseLinearAlgebraLibraryType(
+CERES_EXPORT bool StringToSparseLinearAlgebraLibraryType(
string value,
SparseLinearAlgebraLibraryType* type);
-const char* DenseLinearAlgebraLibraryTypeToString(
+CERES_EXPORT const char* DenseLinearAlgebraLibraryTypeToString(
DenseLinearAlgebraLibraryType type);
-bool StringToDenseLinearAlgebraLibraryType(
+CERES_EXPORT bool StringToDenseLinearAlgebraLibraryType(
string value,
DenseLinearAlgebraLibraryType* type);
-const char* TrustRegionStrategyTypeToString(TrustRegionStrategyType type);
-bool StringToTrustRegionStrategyType(string value,
+CERES_EXPORT const char* TrustRegionStrategyTypeToString(
+ TrustRegionStrategyType type);
+CERES_EXPORT bool StringToTrustRegionStrategyType(string value,
TrustRegionStrategyType* type);
-const char* DoglegTypeToString(DoglegType type);
-bool StringToDoglegType(string value, DoglegType* type);
+CERES_EXPORT const char* DoglegTypeToString(DoglegType type);
+CERES_EXPORT bool StringToDoglegType(string value, DoglegType* type);
-const char* MinimizerTypeToString(MinimizerType type);
-bool StringToMinimizerType(string value, MinimizerType* type);
+CERES_EXPORT const char* MinimizerTypeToString(MinimizerType type);
+CERES_EXPORT bool StringToMinimizerType(string value, MinimizerType* type);
-const char* LineSearchDirectionTypeToString(LineSearchDirectionType type);
-bool StringToLineSearchDirectionType(string value,
+CERES_EXPORT const char* LineSearchDirectionTypeToString(
+ LineSearchDirectionType type);
+CERES_EXPORT bool StringToLineSearchDirectionType(string value,
LineSearchDirectionType* type);
-const char* LineSearchTypeToString(LineSearchType type);
-bool StringToLineSearchType(string value, LineSearchType* type);
+CERES_EXPORT const char* LineSearchTypeToString(LineSearchType type);
+CERES_EXPORT bool StringToLineSearchType(string value, LineSearchType* type);
-const char* NonlinearConjugateGradientTypeToString(
+CERES_EXPORT const char* NonlinearConjugateGradientTypeToString(
NonlinearConjugateGradientType type);
-bool StringToNonlinearConjugateGradientType(
+CERES_EXPORT bool StringToNonlinearConjugateGradientType(
string value,
NonlinearConjugateGradientType* type);
-const char* LineSearchInterpolationTypeToString(
+CERES_EXPORT const char* LineSearchInterpolationTypeToString(
LineSearchInterpolationType type);
-bool StringToLineSearchInterpolationType(
+CERES_EXPORT bool StringToLineSearchInterpolationType(
string value,
LineSearchInterpolationType* type);
-const char* CovarianceAlgorithmTypeToString(
+CERES_EXPORT const char* CovarianceAlgorithmTypeToString(
CovarianceAlgorithmType type);
-bool StringToCovarianceAlgorithmType(
+CERES_EXPORT bool StringToCovarianceAlgorithmType(
string value,
CovarianceAlgorithmType* type);
-const char* LinearSolverTerminationTypeToString(
- LinearSolverTerminationType type);
-
-const char* SolverTerminationTypeToString(SolverTerminationType type);
+CERES_EXPORT const char* TerminationTypeToString(TerminationType type);
-bool IsSchurType(LinearSolverType type);
-bool IsSparseLinearAlgebraLibraryTypeAvailable(
+CERES_EXPORT bool IsSchurType(LinearSolverType type);
+CERES_EXPORT bool IsSparseLinearAlgebraLibraryTypeAvailable(
SparseLinearAlgebraLibraryType type);
-bool IsDenseLinearAlgebraLibraryTypeAvailable(
+CERES_EXPORT bool IsDenseLinearAlgebraLibraryTypeAvailable(
DenseLinearAlgebraLibraryType type);
} // namespace ceres
+#include "ceres/internal/reenable_warnings.h"
+
#endif // CERES_PUBLIC_TYPES_H_
diff --git a/include/ceres/version.h b/include/ceres/version.h
new file mode 100644
index 0000000..370b08a
--- /dev/null
+++ b/include/ceres/version.h
@@ -0,0 +1,49 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef CERES_PUBLIC_VERSION_H_
+#define CERES_PUBLIC_VERSION_H_
+
+#define CERES_VERSION_MAJOR 1
+#define CERES_VERSION_MINOR 10
+#define CERES_VERSION_REVISION 0
+#define CERES_VERSION_ABI 1
+
+// Classic CPP stringifcation; the extra level of indirection allows the
+// preprocessor to expand the macro before being converted to a string.
+#define CERES_TO_STRING_HELPER(x) #x
+#define CERES_TO_STRING(x) CERES_TO_STRING_HELPER(x)
+
+// The Ceres version as a string; for example "1.9.0".
+#define CERES_VERSION_STRING CERES_TO_STRING(CERES_VERSION_MAJOR) "." \
+ CERES_TO_STRING(CERES_VERSION_MINOR) "." \
+ CERES_TO_STRING(CERES_VERSION_REVISION)
+
+#endif // CERES_PUBLIC_VERSION_H_