aboutsummaryrefslogtreecommitdiff
path: root/internal
diff options
context:
space:
mode:
Diffstat (limited to 'internal')
-rw-r--r--internal/ceres/CMakeLists.txt121
-rw-r--r--internal/ceres/array_utils.cc46
-rw-r--r--internal/ceres/array_utils.h23
-rw-r--r--internal/ceres/array_utils_test.cc64
-rw-r--r--internal/ceres/autodiff_local_parameterization_test.cc42
-rw-r--r--internal/ceres/blas.cc1
-rw-r--r--internal/ceres/blas_test.cc303
-rw-r--r--internal/ceres/block_jacobi_preconditioner.cc4
-rw-r--r--internal/ceres/block_random_access_crs_matrix.cc170
-rw-r--r--internal/ceres/block_random_access_diagonal_matrix.cc120
-rw-r--r--internal/ceres/block_random_access_diagonal_matrix.h (renamed from internal/ceres/block_random_access_crs_matrix.h)52
-rw-r--r--internal/ceres/block_random_access_diagonal_matrix_test.cc (renamed from internal/ceres/block_random_access_crs_matrix_test.cc)114
-rw-r--r--internal/ceres/block_random_access_sparse_matrix.h2
-rw-r--r--internal/ceres/block_structure.cc3
-rw-r--r--internal/ceres/block_structure.h14
-rw-r--r--internal/ceres/callbacks.cc109
-rw-r--r--internal/ceres/callbacks.h71
-rw-r--r--internal/ceres/canonical_views_clustering.cc13
-rw-r--r--internal/ceres/canonical_views_clustering.h8
-rw-r--r--internal/ceres/canonical_views_clustering_test.cc5
-rw-r--r--internal/ceres/cgnr_solver.cc18
-rw-r--r--internal/ceres/collections_port.h62
-rw-r--r--internal/ceres/compressed_row_jacobian_writer.cc74
-rw-r--r--internal/ceres/compressed_row_jacobian_writer.h40
-rw-r--r--internal/ceres/compressed_row_sparse_matrix.cc180
-rw-r--r--internal/ceres/compressed_row_sparse_matrix.h29
-rw-r--r--internal/ceres/compressed_row_sparse_matrix_test.cc246
-rw-r--r--internal/ceres/conjugate_gradients_solver.cc65
-rw-r--r--internal/ceres/coordinate_descent_minimizer.cc38
-rw-r--r--internal/ceres/coordinate_descent_minimizer.h14
-rw-r--r--internal/ceres/corrector.cc29
-rw-r--r--internal/ceres/corrector_test.cc4
-rw-r--r--internal/ceres/cost_function_to_functor_test.cc4
-rw-r--r--internal/ceres/covariance_impl.cc376
-rw-r--r--internal/ceres/covariance_impl.h4
-rw-r--r--internal/ceres/covariance_test.cc43
-rw-r--r--internal/ceres/cxsparse.cc7
-rw-r--r--internal/ceres/cxsparse.h10
-rw-r--r--internal/ceres/dense_normal_cholesky_solver.cc28
-rw-r--r--internal/ceres/dense_qr_solver.cc25
-rw-r--r--internal/ceres/dogleg_strategy.cc24
-rw-r--r--internal/ceres/dogleg_strategy_test.cc10
-rw-r--r--internal/ceres/dynamic_compressed_row_finalizer.h51
-rw-r--r--internal/ceres/dynamic_compressed_row_jacobian_writer.cc107
-rw-r--r--internal/ceres/dynamic_compressed_row_jacobian_writer.h83
-rw-r--r--internal/ceres/dynamic_compressed_row_sparse_matrix.cc107
-rw-r--r--internal/ceres/dynamic_compressed_row_sparse_matrix.h99
-rw-r--r--internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc217
-rw-r--r--internal/ceres/dynamic_numeric_diff_cost_function_test.cc519
-rw-r--r--internal/ceres/evaluator.cc16
-rw-r--r--internal/ceres/evaluator.h4
-rw-r--r--internal/ceres/evaluator_test.cc71
-rw-r--r--internal/ceres/generate_eliminator_specialization.py6
-rw-r--r--internal/ceres/generate_partitioned_matrix_view_specializations.py231
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_2_2.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_2_3.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_2_4.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_2_d.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_3_3.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_3_4.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_3_9.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_3_d.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_4_3.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_4_4.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_4_8.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_4_9.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_4_d.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_2_d_d.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_4_4_2.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_4_4_3.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_4_4_4.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_4_4_d.cc59
-rw-r--r--internal/ceres/generated/partitioned_matrix_view_d_d_d.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_2_2_2.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_2_2_3.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_2_2_4.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_2_2_d.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_2_3_3.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_2_3_4.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_2_3_9.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_2_3_d.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_2_4_3.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_2_4_4.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_2_4_8.cc59
-rw-r--r--internal/ceres/generated/schur_eliminator_2_4_9.cc59
-rw-r--r--internal/ceres/generated/schur_eliminator_2_4_d.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_2_d_d.cc59
-rw-r--r--internal/ceres/generated/schur_eliminator_4_4_2.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_4_4_3.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_4_4_4.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_4_4_d.cc5
-rw-r--r--internal/ceres/generated/schur_eliminator_d_d_d.cc2
-rw-r--r--internal/ceres/gradient_checking_cost_function.cc24
-rw-r--r--internal/ceres/gradient_checking_cost_function_test.cc12
-rw-r--r--internal/ceres/implicit_schur_complement.cc21
-rw-r--r--internal/ceres/implicit_schur_complement.h8
-rw-r--r--internal/ceres/implicit_schur_complement_test.cc5
-rw-r--r--internal/ceres/integral_types.h1
-rw-r--r--internal/ceres/iterative_schur_complement_solver.cc38
-rw-r--r--internal/ceres/jet_quaternion_integration_test.cc201
-rw-r--r--internal/ceres/lapack.cc76
-rw-r--r--internal/ceres/lapack.h34
-rw-r--r--internal/ceres/levenberg_marquardt_strategy.cc9
-rw-r--r--internal/ceres/levenberg_marquardt_strategy_test.cc2
-rw-r--r--internal/ceres/line_search.cc290
-rw-r--r--internal/ceres/line_search.h6
-rw-r--r--internal/ceres/line_search_direction.cc55
-rw-r--r--internal/ceres/line_search_direction.h3
-rw-r--r--internal/ceres/line_search_minimizer.cc200
-rw-r--r--internal/ceres/line_search_minimizer.h3
-rw-r--r--internal/ceres/linear_solver.cc30
-rw-r--r--internal/ceres/linear_solver.h37
-rw-r--r--internal/ceres/loss_function.cc34
-rw-r--r--internal/ceres/low_rank_inverse_hessian.cc108
-rw-r--r--internal/ceres/low_rank_inverse_hessian.h8
-rw-r--r--internal/ceres/miniglog/glog/logging.cc39
-rw-r--r--internal/ceres/miniglog/glog/logging.h303
-rw-r--r--internal/ceres/minimizer.cc15
-rw-r--r--internal/ceres/minimizer.h9
-rw-r--r--internal/ceres/minimizer_test.cc39
-rw-r--r--internal/ceres/mutex.h6
-rw-r--r--internal/ceres/numeric_diff_cost_function_test.cc13
-rw-r--r--internal/ceres/ordered_groups_test.cc66
-rw-r--r--internal/ceres/parameter_block.h85
-rw-r--r--internal/ceres/parameter_block_ordering.cc16
-rw-r--r--internal/ceres/parameter_block_ordering.h5
-rw-r--r--internal/ceres/parameter_block_test.cc40
-rw-r--r--internal/ceres/partitioned_matrix_view.cc388
-rw-r--r--internal/ceres/partitioned_matrix_view.h87
-rw-r--r--internal/ceres/partitioned_matrix_view_impl.h380
-rw-r--r--internal/ceres/partitioned_matrix_view_test.cc95
-rw-r--r--internal/ceres/polynomial.cc101
-rw-r--r--internal/ceres/polynomial.h1
-rw-r--r--internal/ceres/preconditioner.cc10
-rw-r--r--internal/ceres/preconditioner.h12
-rw-r--r--internal/ceres/problem.cc42
-rw-r--r--internal/ceres/problem_impl.cc162
-rw-r--r--internal/ceres/problem_impl.h27
-rw-r--r--internal/ceres/problem_test.cc286
-rw-r--r--internal/ceres/program.cc285
-rw-r--r--internal/ceres/program.h62
-rw-r--r--internal/ceres/program_evaluator.h23
-rw-r--r--internal/ceres/program_test.cc431
-rw-r--r--internal/ceres/reorder_program.cc434
-rw-r--r--internal/ceres/reorder_program.h101
-rw-r--r--internal/ceres/reorder_program_test.cc170
-rw-r--r--internal/ceres/residual_block_test.cc6
-rw-r--r--internal/ceres/residual_block_utils.cc18
-rw-r--r--internal/ceres/rotation_test.cc35
-rw-r--r--internal/ceres/runtime_numeric_diff_cost_function.cc217
-rw-r--r--internal/ceres/runtime_numeric_diff_cost_function.h87
-rw-r--r--internal/ceres/runtime_numeric_diff_cost_function_test.cc222
-rw-r--r--internal/ceres/schur_complement_solver.cc282
-rw-r--r--internal/ceres/schur_complement_solver.h31
-rw-r--r--internal/ceres/schur_complement_solver_test.cc18
-rw-r--r--internal/ceres/schur_eliminator.cc17
-rw-r--r--internal/ceres/schur_eliminator_impl.h3
-rw-r--r--internal/ceres/schur_jacobi_preconditioner.cc24
-rw-r--r--internal/ceres/schur_jacobi_preconditioner.h4
-rw-r--r--internal/ceres/schur_ordering.cc101
-rw-r--r--internal/ceres/schur_ordering_test.cc177
-rw-r--r--internal/ceres/single_linkage_clustering.cc110
-rw-r--r--internal/ceres/single_linkage_clustering.h (renamed from internal/ceres/schur_ordering.h)64
-rw-r--r--internal/ceres/single_linkage_clustering_test.cc132
-rw-r--r--internal/ceres/small_blas.h39
-rw-r--r--internal/ceres/solver.cc392
-rw-r--r--internal/ceres/solver_impl.cc1187
-rw-r--r--internal/ceres/solver_impl.h104
-rw-r--r--internal/ceres/solver_impl_test.cc939
-rw-r--r--internal/ceres/solver_test.cc298
-rw-r--r--internal/ceres/sparse_normal_cholesky_solver.cc353
-rw-r--r--internal/ceres/sparse_normal_cholesky_solver.h32
-rw-r--r--internal/ceres/stringprintf.cc4
-rw-r--r--internal/ceres/suitesparse.cc139
-rw-r--r--internal/ceres/suitesparse.h58
-rw-r--r--internal/ceres/summary_utils.cc66
-rw-r--r--internal/ceres/summary_utils.h49
-rw-r--r--internal/ceres/symmetric_linear_solver_test.cc4
-rw-r--r--internal/ceres/system_test.cc71
-rw-r--r--internal/ceres/test_util.cc1
-rw-r--r--internal/ceres/trust_region_minimizer.cc358
-rw-r--r--internal/ceres/trust_region_strategy.h4
-rw-r--r--internal/ceres/types.cc46
-rw-r--r--internal/ceres/unsymmetric_linear_solver_test.cc70
-rw-r--r--internal/ceres/visibility.cc3
-rw-r--r--internal/ceres/visibility.h3
-rw-r--r--internal/ceres/visibility_based_preconditioner.cc102
-rw-r--r--internal/ceres/visibility_based_preconditioner.h3
-rw-r--r--internal/ceres/visibility_based_preconditioner_test.cc3
-rw-r--r--internal/ceres/visibility_test.cc3
190 files changed, 10563 insertions, 5749 deletions
diff --git a/internal/ceres/CMakeLists.txt b/internal/ceres/CMakeLists.txt
index 610e816..4d4f873 100644
--- a/internal/ceres/CMakeLists.txt
+++ b/internal/ceres/CMakeLists.txt
@@ -34,8 +34,8 @@ SET(CERES_INTERNAL_SRC
block_evaluate_preparer.cc
block_jacobi_preconditioner.cc
block_jacobian_writer.cc
- block_random_access_crs_matrix.cc
block_random_access_dense_matrix.cc
+ block_random_access_diagonal_matrix.cc
block_random_access_matrix.cc
block_random_access_sparse_matrix.cc
block_sparse_matrix.cc
@@ -43,6 +43,7 @@ SET(CERES_INTERNAL_SRC
c_api.cc
canonical_views_clustering.cc
cgnr_solver.cc
+ callbacks.cc
compressed_col_sparse_matrix_utils.cc
compressed_row_jacobian_writer.cc
compressed_row_sparse_matrix.cc
@@ -58,6 +59,8 @@ SET(CERES_INTERNAL_SRC
dense_sparse_matrix.cc
detect_structure.cc
dogleg_strategy.cc
+ dynamic_compressed_row_jacobian_writer.cc
+ dynamic_compressed_row_sparse_matrix.cc
evaluator.cc
file.cc
gradient_checking_cost_function.cc
@@ -84,13 +87,14 @@ SET(CERES_INTERNAL_SRC
problem.cc
problem_impl.cc
program.cc
+ reorder_program.cc
residual_block.cc
residual_block_utils.cc
- runtime_numeric_diff_cost_function.cc
schur_complement_solver.cc
schur_eliminator.cc
schur_jacobi_preconditioner.cc
scratch_evaluate_preparer.cc
+ single_linkage_clustering.cc
solver.cc
solver_impl.cc
sparse_matrix.cc
@@ -98,6 +102,7 @@ SET(CERES_INTERNAL_SRC
split.cc
stringprintf.cc
suitesparse.cc
+ summary_utils.cc
triplet_sparse_matrix.cc
trust_region_minimizer.cc
trust_region_strategy.cc
@@ -128,62 +133,31 @@ IF (SCHUR_SPECIALIZATIONS)
FILE(GLOB CERES_INTERNAL_SCHUR_FILES generated/*.cc)
ELSE (SCHUR_SPECIALIZATIONS)
# Only the fully dynamic solver. The build is much faster this way.
- FILE(GLOB CERES_INTERNAL_SCHUR_FILES generated/schur_eliminator_d_d_d.cc)
+ FILE(GLOB CERES_INTERNAL_SCHUR_FILES generated/*_d_d_d.cc)
ENDIF (SCHUR_SPECIALIZATIONS)
-# For Android, use the internal Glog implementation.
-IF (MINIGLOG)
- ADD_LIBRARY(miniglog STATIC miniglog/glog/logging.cc)
- INSTALL(TARGETS miniglog
- EXPORT CeresExport
- RUNTIME DESTINATION bin
- LIBRARY DESTINATION lib${LIB_SUFFIX}
- ARCHIVE DESTINATION lib${LIB_SUFFIX})
-ENDIF (MINIGLOG)
-
-SET(CERES_LIBRARY_DEPENDENCIES ${GLOG_LIB})
-
-IF (GFLAGS)
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${GFLAGS_LIB})
-ENDIF (GFLAGS)
-
-IF (SUITESPARSE_FOUND)
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${SUITESPARSEQR_LIB})
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CHOLMOD_LIB})
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CCOLAMD_LIB})
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CAMD_LIB})
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${COLAMD_LIB})
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${AMD_LIB})
- IF (EXISTS ${SUITESPARSE_CONFIG_LIB})
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${SUITESPARSE_CONFIG_LIB})
- ENDIF (EXISTS ${SUITESPARSE_CONFIG_LIB})
-
- IF (EXISTS ${METIS_LIB})
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${METIS_LIB})
- ENDIF (EXISTS ${METIS_LIB})
+# Build the list of dependencies for Ceres based on the current configuration.
+IF (NOT MINIGLOG AND GLOG_FOUND)
+ LIST(APPEND CERES_LIBRARY_PUBLIC_DEPENDENCIES ${GLOG_LIBRARIES})
+ENDIF (NOT MINIGLOG AND GLOG_FOUND)
- IF (TBB_FOUND)
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${TBB_LIB})
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${TBB_MALLOC_LIB})
- ENDIF (TBB_FOUND)
-ENDIF (SUITESPARSE_FOUND)
+IF (SUITESPARSE AND SUITESPARSE_FOUND)
+ LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${SUITESPARSE_LIBRARIES})
+ENDIF (SUITESPARSE AND SUITESPARSE_FOUND)
-IF (CXSPARSE_FOUND)
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CXSPARSE_LIB})
-ENDIF (CXSPARSE_FOUND)
+IF (CXSPARSE AND CXSPARSE_FOUND)
+ LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${CXSPARSE_LIBRARIES})
+ENDIF (CXSPARSE AND CXSPARSE_FOUND)
-IF (BLAS_AND_LAPACK_FOUND)
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${LAPACK_LIBRARIES})
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${BLAS_LIBRARIES})
-ENDIF (BLAS_AND_LAPACK_FOUND)
-
-IF (CXSPARSE_FOUND)
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CXSPARSE_LIB})
-ENDIF (CXSPARSE_FOUND)
+IF (BLAS_FOUND AND LAPACK_FOUND)
+ LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${LAPACK_LIBRARIES})
+ LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${BLAS_LIBRARIES})
+ENDIF (BLAS_FOUND AND LAPACK_FOUND)
IF (OPENMP_FOUND)
IF (NOT MSVC)
- LIST(APPEND CERES_LIBRARY_DEPENDENCIES gomp)
+ LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES gomp)
+ LIST(APPEND CERES_LIBRARY_PRIVATE_DEPENDENCIES ${CMAKE_THREAD_LIBS_INIT})
ENDIF (NOT MSVC)
ENDIF (OPENMP_FOUND)
@@ -192,12 +166,33 @@ SET(CERES_LIBRARY_SOURCE
${CERES_INTERNAL_HDRS}
${CERES_INTERNAL_SCHUR_FILES})
+# Primarily for Android, but optionally for others, compile the minimal
+# glog implementation into Ceres.
+IF (MINIGLOG)
+ LIST(APPEND CERES_LIBRARY_SOURCE miniglog/glog/logging.cc)
+ENDIF (MINIGLOG)
+
ADD_LIBRARY(ceres ${CERES_LIBRARY_SOURCE})
SET_TARGET_PROPERTIES(ceres PROPERTIES
VERSION ${CERES_VERSION}
SOVERSION ${CERES_VERSION_MAJOR}
)
-TARGET_LINK_LIBRARIES(ceres ${CERES_LIBRARY_DEPENDENCIES})
+
+IF (BUILD_SHARED_LIBS)
+ # When building a shared library, mark all external libraries as
+ # PRIVATE so they don't show up as a dependency.
+ TARGET_LINK_LIBRARIES(ceres
+ LINK_PUBLIC ${CERES_LIBRARY_PUBLIC_DEPENDENCIES}
+ LINK_PRIVATE ${CERES_LIBRARY_PRIVATE_DEPENDENCIES})
+ELSE (BUILD_SHARED_LIBS)
+ # When building a static library, all external libraries are
+ # PUBLIC(default) since the user needs to link to them.
+ # They will be listed in CeresTargets.cmake.
+ SET(CERES_LIBRARY_DEPENDENCIES
+ ${CERES_LIBRARY_PUBLIC_DEPENDENCIES}
+ ${CERES_LIBRARY_PRIVATE_DEPENDENCIES})
+ TARGET_LINK_LIBRARIES(ceres ${CERES_LIBRARY_DEPENDENCIES})
+ENDIF (BUILD_SHARED_LIBS)
INSTALL(TARGETS ceres
EXPORT CeresExport
@@ -212,8 +207,15 @@ IF (BUILD_TESTING AND GFLAGS)
numeric_diff_test_utils.cc
test_util.cc)
- TARGET_LINK_LIBRARIES(gtest ${GFLAGS_LIB} ${GLOG_LIB})
- TARGET_LINK_LIBRARIES(test_util ceres gtest ${GLOG_LIB})
+ IF (MINIGLOG)
+ # When using miniglog, it is compiled into Ceres, thus Ceres becomes
+ # the library against which other libraries should link for logging.
+ TARGET_LINK_LIBRARIES(gtest ${GFLAGS_LIBRARIES} ceres)
+ TARGET_LINK_LIBRARIES(test_util ceres gtest)
+ ELSE (MINIGLOG)
+ TARGET_LINK_LIBRARIES(gtest ${GFLAGS_LIBRARIES} ${GLOG_LIBRARIES})
+ TARGET_LINK_LIBRARIES(test_util ceres gtest ${GLOG_LIBRARIES})
+ ENDIF (MINIGLOG)
MACRO (CERES_TEST NAME)
ADD_EXECUTABLE(${NAME}_test ${NAME}_test.cc)
@@ -228,8 +230,8 @@ IF (BUILD_TESTING AND GFLAGS)
CERES_TEST(autodiff)
CERES_TEST(autodiff_cost_function)
CERES_TEST(autodiff_local_parameterization)
- CERES_TEST(block_random_access_crs_matrix)
CERES_TEST(block_random_access_dense_matrix)
+ CERES_TEST(block_random_access_diagonal_matrix)
CERES_TEST(block_random_access_sparse_matrix)
CERES_TEST(block_sparse_matrix)
CERES_TEST(c_api)
@@ -241,6 +243,8 @@ IF (BUILD_TESTING AND GFLAGS)
CERES_TEST(covariance)
CERES_TEST(dense_sparse_matrix)
CERES_TEST(dynamic_autodiff_cost_function)
+ CERES_TEST(dynamic_compressed_row_sparse_matrix)
+ CERES_TEST(dynamic_numeric_diff_cost_function)
CERES_TEST(evaluator)
CERES_TEST(gradient_checker)
CERES_TEST(gradient_checking_cost_function)
@@ -264,20 +268,23 @@ IF (BUILD_TESTING AND GFLAGS)
CERES_TEST(partitioned_matrix_view)
CERES_TEST(polynomial)
CERES_TEST(problem)
+ CERES_TEST(program)
+ CERES_TEST(reorder_program)
CERES_TEST(residual_block)
CERES_TEST(residual_block_utils)
CERES_TEST(rotation)
- CERES_TEST(runtime_numeric_diff_cost_function)
CERES_TEST(schur_complement_solver)
CERES_TEST(schur_eliminator)
+ CERES_TEST(single_linkage_clustering)
CERES_TEST(small_blas)
+ CERES_TEST(solver)
CERES_TEST(solver_impl)
# TODO(sameeragarwal): This test should ultimately be made
# independent of SuiteSparse.
- IF (SUITESPARSE_FOUND)
+ IF (SUITESPARSE AND SUITESPARSE_FOUND)
CERES_TEST(compressed_col_sparse_matrix_utils)
- ENDIF (SUITESPARSE_FOUND)
+ ENDIF (SUITESPARSE AND SUITESPARSE_FOUND)
CERES_TEST(symmetric_linear_solver)
CERES_TEST(triplet_sparse_matrix)
diff --git a/internal/ceres/array_utils.cc b/internal/ceres/array_utils.cc
index 673baa4..205ddaf 100644
--- a/internal/ceres/array_utils.cc
+++ b/internal/ceres/array_utils.cc
@@ -30,9 +30,13 @@
#include "ceres/array_utils.h"
+#include <algorithm>
#include <cmath>
#include <cstddef>
+#include <string>
+#include <vector>
#include "ceres/fpclassify.h"
+#include "ceres/stringprintf.h"
namespace ceres {
namespace internal {
@@ -55,6 +59,20 @@ bool IsArrayValid(const int size, const double* x) {
return true;
}
+int FindInvalidValue(const int size, const double* x) {
+ if (x == NULL) {
+ return size;
+ }
+
+ for (int i = 0; i < size; ++i) {
+ if (!IsFinite(x[i]) || (x[i] == kImpossibleValue)) {
+ return i;
+ }
+ }
+
+ return size;
+};
+
void InvalidateArray(const int size, double* x) {
if (x != NULL) {
for (int i = 0; i < size; ++i) {
@@ -63,5 +81,33 @@ void InvalidateArray(const int size, double* x) {
}
}
+void AppendArrayToString(const int size, const double* x, string* result) {
+ for (int i = 0; i < size; ++i) {
+ if (x == NULL) {
+ StringAppendF(result, "Not Computed ");
+ } else {
+ if (x[i] == kImpossibleValue) {
+ StringAppendF(result, "Uninitialized ");
+ } else {
+ StringAppendF(result, "%12g ", x[i]);
+ }
+ }
+ }
+}
+
+void MapValuesToContiguousRange(const int size, int* array) {
+ std::vector<int> unique_values(array, array + size);
+ std::sort(unique_values.begin(), unique_values.end());
+ unique_values.erase(std::unique(unique_values.begin(),
+ unique_values.end()),
+ unique_values.end());
+
+ for (int i = 0; i < size; ++i) {
+ array[i] = std::lower_bound(unique_values.begin(),
+ unique_values.end(),
+ array[i]) - unique_values.begin();
+ }
+}
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/array_utils.h b/internal/ceres/array_utils.h
index 742f439..7f56947 100644
--- a/internal/ceres/array_utils.h
+++ b/internal/ceres/array_utils.h
@@ -57,8 +57,31 @@ void InvalidateArray(int size, double* x);
// equal to the "impossible" value used by InvalidateArray.
bool IsArrayValid(int size, const double* x);
+// If the array contains an invalid value, return the index for it,
+// otherwise return size.
+int FindInvalidValue(const int size, const double* x);
+
+// Utility routine to print an array of doubles to a string. If the
+// array pointer is NULL, it is treated as an array of zeros.
+void AppendArrayToString(const int size, const double* x, string* result);
+
extern const double kImpossibleValue;
+// This routine takes an array of integer values, sorts and uniques
+// them and then maps each value in the array to its position in the
+// sorted+uniqued array. By doing this, if there are are k unique
+// values in the array, each value is replaced by an integer in the
+// range [0, k-1], while preserving their relative order.
+//
+// For example
+//
+// [1 0 3 5 0 1 5]
+//
+// gets mapped to
+//
+// [1 0 2 3 0 1 3]
+void MapValuesToContiguousRange(int size, int* array);
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/array_utils_test.cc b/internal/ceres/array_utils_test.cc
index c19a44a..203a301 100644
--- a/internal/ceres/array_utils_test.cc
+++ b/internal/ceres/array_utils_test.cc
@@ -32,6 +32,7 @@
#include <limits>
#include <cmath>
+#include <vector>
#include "gtest/gtest.h"
namespace ceres {
@@ -54,5 +55,68 @@ TEST(ArrayUtils, IsArrayValid) {
EXPECT_FALSE(IsArrayValid(3, x));
}
+TEST(ArrayUtils, FindInvalidIndex) {
+ double x[3];
+ x[0] = 0.0;
+ x[1] = 1.0;
+ x[2] = 2.0;
+ EXPECT_EQ(FindInvalidValue(3, x), 3);
+ x[1] = std::numeric_limits<double>::infinity();
+ EXPECT_EQ(FindInvalidValue(3, x), 1);
+ x[1] = std::numeric_limits<double>::quiet_NaN();
+ EXPECT_EQ(FindInvalidValue(3, x), 1);
+ x[1] = std::numeric_limits<double>::signaling_NaN();
+ EXPECT_EQ(FindInvalidValue(3, x), 1);
+ EXPECT_EQ(FindInvalidValue(1, NULL), 1);
+ InvalidateArray(3, x);
+ EXPECT_EQ(FindInvalidValue(3, x), 0);
+}
+
+TEST(MapValuesToContiguousRange, ContiguousEntries) {
+ vector<int> array;
+ array.push_back(0);
+ array.push_back(1);
+ vector<int> expected = array;
+ MapValuesToContiguousRange(array.size(), &array[0]);
+ EXPECT_EQ(array, expected);
+ array.clear();
+
+ array.push_back(1);
+ array.push_back(0);
+ expected = array;
+ MapValuesToContiguousRange(array.size(), &array[0]);
+ EXPECT_EQ(array, expected);
+}
+
+TEST(MapValuesToContiguousRange, NonContiguousEntries) {
+ vector<int> array;
+ array.push_back(0);
+ array.push_back(2);
+ vector<int> expected;
+ expected.push_back(0);
+ expected.push_back(1);
+ MapValuesToContiguousRange(array.size(), &array[0]);
+ EXPECT_EQ(array, expected);
+}
+
+TEST(MapValuesToContiguousRange, NonContiguousRepeatingEntries) {
+ vector<int> array;
+ array.push_back(3);
+ array.push_back(1);
+ array.push_back(0);
+ array.push_back(0);
+ array.push_back(0);
+ array.push_back(5);
+ vector<int> expected;
+ expected.push_back(2);
+ expected.push_back(1);
+ expected.push_back(0);
+ expected.push_back(0);
+ expected.push_back(0);
+ expected.push_back(3);
+ MapValuesToContiguousRange(array.size(), &array[0]);
+ EXPECT_EQ(array, expected);
+}
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/autodiff_local_parameterization_test.cc b/internal/ceres/autodiff_local_parameterization_test.cc
index 7e90177..a0f705d 100644
--- a/internal/ceres/autodiff_local_parameterization_test.cc
+++ b/internal/ceres/autodiff_local_parameterization_test.cc
@@ -48,7 +48,6 @@ struct IdentityPlus {
}
};
-
TEST(AutoDiffLocalParameterizationTest, IdentityParameterization) {
AutoDiffLocalParameterization<IdentityPlus, 3, 3>
parameterization;
@@ -72,6 +71,47 @@ TEST(AutoDiffLocalParameterizationTest, IdentityParameterization) {
}
}
+struct ScaledPlus {
+ ScaledPlus(const double &scale_factor)
+ : scale_factor_(scale_factor)
+ {}
+
+ template <typename T>
+ bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
+ for (int i = 0; i < 3; ++i) {
+ x_plus_delta[i] = x[i] + T(scale_factor_) * delta[i];
+ }
+ return true;
+ }
+
+ const double scale_factor_;
+};
+
+TEST(AutoDiffLocalParameterizationTest, ScaledParameterization) {
+ const double kTolerance = 1e-14;
+
+ AutoDiffLocalParameterization<ScaledPlus, 3, 3>
+ parameterization(new ScaledPlus(1.2345));
+
+ double x[3] = {1.0, 2.0, 3.0};
+ double delta[3] = {0.0, 1.0, 2.0};
+ double x_plus_delta[3] = {0.0, 0.0, 0.0};
+ parameterization.Plus(x, delta, x_plus_delta);
+
+ EXPECT_NEAR(x_plus_delta[0], 1.0, kTolerance);
+ EXPECT_NEAR(x_plus_delta[1], 3.2345, kTolerance);
+ EXPECT_NEAR(x_plus_delta[2], 5.469, kTolerance);
+
+ double jacobian[9];
+ parameterization.ComputeJacobian(x, jacobian);
+ int k = 0;
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 3; ++j, ++k) {
+ EXPECT_NEAR(jacobian[k], (i == j) ? 1.2345 : 0.0, kTolerance);
+ }
+ }
+}
+
struct QuaternionPlus {
template<typename T>
bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
diff --git a/internal/ceres/blas.cc b/internal/ceres/blas.cc
index f79b1eb..b919e13 100644
--- a/internal/ceres/blas.cc
+++ b/internal/ceres/blas.cc
@@ -29,6 +29,7 @@
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/blas.h"
+#include "ceres/internal/port.h"
#include "glog/logging.h"
extern "C" void dsyrk_(char* uplo,
diff --git a/internal/ceres/blas_test.cc b/internal/ceres/blas_test.cc
deleted file mode 100644
index efa7e7b..0000000
--- a/internal/ceres/blas_test.cc
+++ /dev/null
@@ -1,303 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2013 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-
-#include "ceres/blas.h"
-
-#include "gtest/gtest.h"
-#include "ceres/internal/eigen.h"
-
-namespace ceres {
-namespace internal {
-
-TEST(BLAS, MatrixMatrixMultiply) {
- const double kTolerance = 1e-16;
- const int kRowA = 3;
- const int kColA = 5;
- Matrix A(kRowA, kColA);
- A.setOnes();
-
- const int kRowB = 5;
- const int kColB = 7;
- Matrix B(kRowB, kColB);
- B.setOnes();
-
- for (int row_stride_c = kRowA; row_stride_c < 3 * kRowA; ++row_stride_c) {
- for (int col_stride_c = kColB; col_stride_c < 3 * kColB; ++col_stride_c) {
- Matrix C(row_stride_c, col_stride_c);
- C.setOnes();
-
- Matrix C_plus = C;
- Matrix C_minus = C;
- Matrix C_assign = C;
-
- Matrix C_plus_ref = C;
- Matrix C_minus_ref = C;
- Matrix C_assign_ref = C;
- for (int start_row_c = 0; start_row_c + kRowA < row_stride_c; ++start_row_c) {
- for (int start_col_c = 0; start_col_c + kColB < col_stride_c; ++start_col_c) {
- C_plus_ref.block(start_row_c, start_col_c, kRowA, kColB) +=
- A * B;
-
- MatrixMatrixMultiply<kRowA, kColA, kRowB, kColB, 1>(
- A.data(), kRowA, kColA,
- B.data(), kRowB, kColB,
- C_plus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
-
- EXPECT_NEAR((C_plus_ref - C_plus).norm(), 0.0, kTolerance)
- << "C += A * B \n"
- << "row_stride_c : " << row_stride_c << "\n"
- << "col_stride_c : " << col_stride_c << "\n"
- << "start_row_c : " << start_row_c << "\n"
- << "start_col_c : " << start_col_c << "\n"
- << "Cref : \n" << C_plus_ref << "\n"
- << "C: \n" << C_plus;
-
-
- C_minus_ref.block(start_row_c, start_col_c, kRowA, kColB) -=
- A * B;
-
- MatrixMatrixMultiply<kRowA, kColA, kRowB, kColB, -1>(
- A.data(), kRowA, kColA,
- B.data(), kRowB, kColB,
- C_minus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
-
- EXPECT_NEAR((C_minus_ref - C_minus).norm(), 0.0, kTolerance)
- << "C -= A * B \n"
- << "row_stride_c : " << row_stride_c << "\n"
- << "col_stride_c : " << col_stride_c << "\n"
- << "start_row_c : " << start_row_c << "\n"
- << "start_col_c : " << start_col_c << "\n"
- << "Cref : \n" << C_minus_ref << "\n"
- << "C: \n" << C_minus;
-
- C_assign_ref.block(start_row_c, start_col_c, kRowA, kColB) =
- A * B;
-
- MatrixMatrixMultiply<kRowA, kColA, kRowB, kColB, 0>(
- A.data(), kRowA, kColA,
- B.data(), kRowB, kColB,
- C_assign.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
-
- EXPECT_NEAR((C_assign_ref - C_assign).norm(), 0.0, kTolerance)
- << "C = A * B \n"
- << "row_stride_c : " << row_stride_c << "\n"
- << "col_stride_c : " << col_stride_c << "\n"
- << "start_row_c : " << start_row_c << "\n"
- << "start_col_c : " << start_col_c << "\n"
- << "Cref : \n" << C_assign_ref << "\n"
- << "C: \n" << C_assign;
- }
- }
- }
- }
-}
-
-TEST(BLAS, MatrixTransposeMatrixMultiply) {
- const double kTolerance = 1e-16;
- const int kRowA = 5;
- const int kColA = 3;
- Matrix A(kRowA, kColA);
- A.setOnes();
-
- const int kRowB = 5;
- const int kColB = 7;
- Matrix B(kRowB, kColB);
- B.setOnes();
-
- for (int row_stride_c = kColA; row_stride_c < 3 * kColA; ++row_stride_c) {
- for (int col_stride_c = kColB; col_stride_c < 3 * kColB; ++col_stride_c) {
- Matrix C(row_stride_c, col_stride_c);
- C.setOnes();
-
- Matrix C_plus = C;
- Matrix C_minus = C;
- Matrix C_assign = C;
-
- Matrix C_plus_ref = C;
- Matrix C_minus_ref = C;
- Matrix C_assign_ref = C;
- for (int start_row_c = 0; start_row_c + kColA < row_stride_c; ++start_row_c) {
- for (int start_col_c = 0; start_col_c + kColB < col_stride_c; ++start_col_c) {
- C_plus_ref.block(start_row_c, start_col_c, kColA, kColB) +=
- A.transpose() * B;
-
- MatrixTransposeMatrixMultiply<kRowA, kColA, kRowB, kColB, 1>(
- A.data(), kRowA, kColA,
- B.data(), kRowB, kColB,
- C_plus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
-
- EXPECT_NEAR((C_plus_ref - C_plus).norm(), 0.0, kTolerance)
- << "C += A' * B \n"
- << "row_stride_c : " << row_stride_c << "\n"
- << "col_stride_c : " << col_stride_c << "\n"
- << "start_row_c : " << start_row_c << "\n"
- << "start_col_c : " << start_col_c << "\n"
- << "Cref : \n" << C_plus_ref << "\n"
- << "C: \n" << C_plus;
-
- C_minus_ref.block(start_row_c, start_col_c, kColA, kColB) -=
- A.transpose() * B;
-
- MatrixTransposeMatrixMultiply<kRowA, kColA, kRowB, kColB, -1>(
- A.data(), kRowA, kColA,
- B.data(), kRowB, kColB,
- C_minus.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
-
- EXPECT_NEAR((C_minus_ref - C_minus).norm(), 0.0, kTolerance)
- << "C -= A' * B \n"
- << "row_stride_c : " << row_stride_c << "\n"
- << "col_stride_c : " << col_stride_c << "\n"
- << "start_row_c : " << start_row_c << "\n"
- << "start_col_c : " << start_col_c << "\n"
- << "Cref : \n" << C_minus_ref << "\n"
- << "C: \n" << C_minus;
-
- C_assign_ref.block(start_row_c, start_col_c, kColA, kColB) =
- A.transpose() * B;
-
- MatrixTransposeMatrixMultiply<kRowA, kColA, kRowB, kColB, 0>(
- A.data(), kRowA, kColA,
- B.data(), kRowB, kColB,
- C_assign.data(), start_row_c, start_col_c, row_stride_c, col_stride_c);
-
- EXPECT_NEAR((C_assign_ref - C_assign).norm(), 0.0, kTolerance)
- << "C = A' * B \n"
- << "row_stride_c : " << row_stride_c << "\n"
- << "col_stride_c : " << col_stride_c << "\n"
- << "start_row_c : " << start_row_c << "\n"
- << "start_col_c : " << start_col_c << "\n"
- << "Cref : \n" << C_assign_ref << "\n"
- << "C: \n" << C_assign;
- }
- }
- }
- }
-}
-
-TEST(BLAS, MatrixVectorMultiply) {
- const double kTolerance = 1e-16;
- const int kRowA = 5;
- const int kColA = 3;
- Matrix A(kRowA, kColA);
- A.setOnes();
-
- Vector b(kColA);
- b.setOnes();
-
- Vector c(kRowA);
- c.setOnes();
-
- Vector c_plus = c;
- Vector c_minus = c;
- Vector c_assign = c;
-
- Vector c_plus_ref = c;
- Vector c_minus_ref = c;
- Vector c_assign_ref = c;
-
- c_plus_ref += A * b;
- MatrixVectorMultiply<kRowA, kColA, 1>(A.data(), kRowA, kColA,
- b.data(),
- c_plus.data());
- EXPECT_NEAR((c_plus_ref - c_plus).norm(), 0.0, kTolerance)
- << "c += A * b \n"
- << "c_ref : \n" << c_plus_ref << "\n"
- << "c: \n" << c_plus;
-
- c_minus_ref -= A * b;
- MatrixVectorMultiply<kRowA, kColA, -1>(A.data(), kRowA, kColA,
- b.data(),
- c_minus.data());
- EXPECT_NEAR((c_minus_ref - c_minus).norm(), 0.0, kTolerance)
- << "c += A * b \n"
- << "c_ref : \n" << c_minus_ref << "\n"
- << "c: \n" << c_minus;
-
- c_assign_ref = A * b;
- MatrixVectorMultiply<kRowA, kColA, 0>(A.data(), kRowA, kColA,
- b.data(),
- c_assign.data());
- EXPECT_NEAR((c_assign_ref - c_assign).norm(), 0.0, kTolerance)
- << "c += A * b \n"
- << "c_ref : \n" << c_assign_ref << "\n"
- << "c: \n" << c_assign;
-}
-
-TEST(BLAS, MatrixTransposeVectorMultiply) {
- const double kTolerance = 1e-16;
- const int kRowA = 5;
- const int kColA = 3;
- Matrix A(kRowA, kColA);
- A.setRandom();
-
- Vector b(kRowA);
- b.setRandom();
-
- Vector c(kColA);
- c.setOnes();
-
- Vector c_plus = c;
- Vector c_minus = c;
- Vector c_assign = c;
-
- Vector c_plus_ref = c;
- Vector c_minus_ref = c;
- Vector c_assign_ref = c;
-
- c_plus_ref += A.transpose() * b;
- MatrixTransposeVectorMultiply<kRowA, kColA, 1>(A.data(), kRowA, kColA,
- b.data(),
- c_plus.data());
- EXPECT_NEAR((c_plus_ref - c_plus).norm(), 0.0, kTolerance)
- << "c += A' * b \n"
- << "c_ref : \n" << c_plus_ref << "\n"
- << "c: \n" << c_plus;
-
- c_minus_ref -= A.transpose() * b;
- MatrixTransposeVectorMultiply<kRowA, kColA, -1>(A.data(), kRowA, kColA,
- b.data(),
- c_minus.data());
- EXPECT_NEAR((c_minus_ref - c_minus).norm(), 0.0, kTolerance)
- << "c += A' * b \n"
- << "c_ref : \n" << c_minus_ref << "\n"
- << "c: \n" << c_minus;
-
- c_assign_ref = A.transpose() * b;
- MatrixTransposeVectorMultiply<kRowA, kColA, 0>(A.data(), kRowA, kColA,
- b.data(),
- c_assign.data());
- EXPECT_NEAR((c_assign_ref - c_assign).norm(), 0.0, kTolerance)
- << "c += A' * b \n"
- << "c_ref : \n" << c_assign_ref << "\n"
- << "c: \n" << c_assign;
-}
-
-} // namespace internal
-} // namespace ceres
diff --git a/internal/ceres/block_jacobi_preconditioner.cc b/internal/ceres/block_jacobi_preconditioner.cc
index 29974d4..19b749b 100644
--- a/internal/ceres/block_jacobi_preconditioner.cc
+++ b/internal/ceres/block_jacobi_preconditioner.cc
@@ -94,7 +94,9 @@ bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
//
// MatrixRef(blocks_[cells[c].block_id],
// col_block_size,
- // col_block_size).selfadjointView<Eigen::Upper>().rankUpdate(m);
+ // col_block_size)
+ // .selfadjointView<Eigen::Upper>()
+ // .rankUpdate(m);
//
}
}
diff --git a/internal/ceres/block_random_access_crs_matrix.cc b/internal/ceres/block_random_access_crs_matrix.cc
deleted file mode 100644
index 5b008e2..0000000
--- a/internal/ceres/block_random_access_crs_matrix.cc
+++ /dev/null
@@ -1,170 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2013 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: sameeragarwal@google.com (Sameer Agarwal)
-
-#include "ceres/block_random_access_crs_matrix.h"
-
-#include <algorithm>
-#include <set>
-#include <utility>
-#include <vector>
-#include "ceres/compressed_row_sparse_matrix.h"
-#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/mutex.h"
-#include "ceres/triplet_sparse_matrix.h"
-#include "ceres/types.h"
-#include "glog/logging.h"
-
-namespace ceres {
-namespace internal {
-
-BlockRandomAccessCRSMatrix::BlockRandomAccessCRSMatrix(
- const vector<int>& blocks,
- const set<pair<int, int> >& block_pairs)
- : kMaxRowBlocks(10 * 1000 * 1000),
- blocks_(blocks) {
- CHECK_LT(blocks.size(), kMaxRowBlocks);
-
- col_layout_.resize(blocks_.size(), 0);
- row_strides_.resize(blocks_.size(), 0);
-
- // Build the row/column layout vector and count the number of scalar
- // rows/columns.
- int num_cols = 0;
- for (int i = 0; i < blocks_.size(); ++i) {
- col_layout_[i] = num_cols;
- num_cols += blocks_[i];
- }
-
- // Walk the sparsity pattern and count the number of non-zeros.
- int num_nonzeros = 0;
- for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
- it != block_pairs.end();
- ++it) {
- const int row_block_size = blocks_[it->first];
- const int col_block_size = blocks_[it->second];
- num_nonzeros += row_block_size * col_block_size;
- }
-
- VLOG(2) << "Matrix Size [" << num_cols
- << "," << num_cols
- << "] " << num_nonzeros;
-
- crsm_.reset(new CompressedRowSparseMatrix(num_cols, num_cols, num_nonzeros));
- int* rows = crsm_->mutable_rows();
- int* cols = crsm_->mutable_cols();
- double* values = crsm_->mutable_values();
-
- // Iterate over the sparsity pattern and fill the scalar sparsity
- // pattern of the underlying compressed sparse row matrix. Along the
- // way also fill out the Layout object which will allow random
- // access into the CRS Matrix.
- set<pair<int, int> >::const_iterator it = block_pairs.begin();
- vector<int> col_blocks;
- int row_pos = 0;
- rows[0] = 0;
- while (it != block_pairs.end()) {
- // Add entries to layout_ for all the blocks for this row.
- col_blocks.clear();
- const int row_block_id = it->first;
- const int row_block_size = blocks_[row_block_id];
- int num_cols = 0;
- while ((it != block_pairs.end()) && (it->first == row_block_id)) {
- layout_[IntPairToLong(it->first, it->second)] =
- new CellInfo(values + num_cols);
- col_blocks.push_back(it->second);
- num_cols += blocks_[it->second];
- ++it;
- };
-
- // Count the number of non-zeros in the row block.
- for (int j = 0; j < row_block_size; ++j) {
- rows[row_pos + j + 1] = rows[row_pos + j] + num_cols;
- }
-
- // Fill out the sparsity pattern for each row.
- int col_pos = 0;
- for (int j = 0; j < col_blocks.size(); ++j) {
- const int col_block_id = col_blocks[j];
- const int col_block_size = blocks_[col_block_id];
- for (int r = 0; r < row_block_size; ++r) {
- const int column_block_begin = rows[row_pos + r] + col_pos;
- for (int c = 0; c < col_block_size; ++c) {
- cols[column_block_begin + c] = col_layout_[col_block_id] + c;
- }
- }
- col_pos += col_block_size;
- }
-
- row_pos += row_block_size;
- values += row_block_size * num_cols;
- row_strides_[row_block_id] = num_cols;
- }
-}
-
-// Assume that the user does not hold any locks on any cell blocks
-// when they are calling SetZero.
-BlockRandomAccessCRSMatrix::~BlockRandomAccessCRSMatrix() {
- // TODO(sameeragarwal) this should be rationalized going forward and
- // perhaps moved into BlockRandomAccessMatrix.
- for (LayoutType::iterator it = layout_.begin();
- it != layout_.end();
- ++it) {
- delete it->second;
- }
-}
-
-CellInfo* BlockRandomAccessCRSMatrix::GetCell(int row_block_id,
- int col_block_id,
- int* row,
- int* col,
- int* row_stride,
- int* col_stride) {
- const LayoutType::iterator it =
- layout_.find(IntPairToLong(row_block_id, col_block_id));
- if (it == layout_.end()) {
- return NULL;
- }
-
- *row = 0;
- *col = 0;
- *row_stride = blocks_[row_block_id];
- *col_stride = row_strides_[row_block_id];
- return it->second;
-}
-
-// Assume that the user does not hold any locks on any cell blocks
-// when they are calling SetZero.
-void BlockRandomAccessCRSMatrix::SetZero() {
- crsm_->SetZero();
-}
-
-} // namespace internal
-} // namespace ceres
diff --git a/internal/ceres/block_random_access_diagonal_matrix.cc b/internal/ceres/block_random_access_diagonal_matrix.cc
new file mode 100644
index 0000000..d8bf4ef
--- /dev/null
+++ b/internal/ceres/block_random_access_diagonal_matrix.cc
@@ -0,0 +1,120 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_random_access_diagonal_matrix.h"
+
+#include <algorithm>
+#include <set>
+#include <utility>
+#include <vector>
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "ceres/stl_util.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+BlockRandomAccessDiagonalMatrix::BlockRandomAccessDiagonalMatrix(
+ const vector<int>& blocks)
+ : blocks_(blocks) {
+ // Build the row/column layout vector and count the number of scalar
+ // rows/columns.
+ int num_cols = 0;
+ int num_nonzeros = 0;
+ vector<int> col_layout;
+ for (int i = 0; i < blocks_.size(); ++i) {
+ col_layout.push_back(num_cols);
+ num_cols += blocks_[i];
+ num_nonzeros += blocks_[i] * blocks_[i];
+ }
+
+ VLOG(1) << "Matrix Size [" << num_cols
+ << "," << num_cols
+ << "] " << num_nonzeros;
+
+ tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
+ tsm_->set_num_nonzeros(num_nonzeros);
+ int* rows = tsm_->mutable_rows();
+ int* cols = tsm_->mutable_cols();
+ double* values = tsm_->mutable_values();
+
+ int pos = 0;
+ for (int i = 0; i < blocks_.size(); ++i) {
+ const int block_size = blocks_[i];
+ layout_.push_back(new CellInfo(values + pos));
+ const int block_begin = col_layout[i];
+ for (int r = 0; r < block_size; ++r) {
+ for (int c = 0; c < block_size; ++c, ++pos) {
+ rows[pos] = block_begin + r;
+ cols[pos] = block_begin + c;
+ }
+ }
+ }
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+BlockRandomAccessDiagonalMatrix::~BlockRandomAccessDiagonalMatrix() {
+ STLDeleteContainerPointers(layout_.begin(), layout_.end());
+}
+
+CellInfo* BlockRandomAccessDiagonalMatrix::GetCell(int row_block_id,
+ int col_block_id,
+ int* row,
+ int* col,
+ int* row_stride,
+ int* col_stride) {
+ if (row_block_id != col_block_id) {
+ return NULL;
+ }
+ const int stride = blocks_[row_block_id];
+
+ // Each cell is stored contiguously as its own little dense matrix.
+ *row = 0;
+ *col = 0;
+ *row_stride = stride;
+ *col_stride = stride;
+ return layout_[row_block_id];
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+void BlockRandomAccessDiagonalMatrix::SetZero() {
+ if (tsm_->num_nonzeros()) {
+ VectorRef(tsm_->mutable_values(),
+ tsm_->num_nonzeros()).setZero();
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_random_access_crs_matrix.h b/internal/ceres/block_random_access_diagonal_matrix.h
index 11a203b..6b3cff2 100644
--- a/internal/ceres/block_random_access_crs_matrix.h
+++ b/internal/ceres/block_random_access_diagonal_matrix.h
@@ -28,16 +28,16 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_CRS_MATRIX_H_
-#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_CRS_MATRIX_H_
+#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
+#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
#include <set>
#include <vector>
#include <utility>
#include "ceres/mutex.h"
#include "ceres/block_random_access_matrix.h"
-#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/collections_port.h"
+#include "ceres/triplet_sparse_matrix.h"
#include "ceres/integral_types.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
@@ -47,19 +47,16 @@
namespace ceres {
namespace internal {
-// A square BlockRandomAccessMatrix where the underlying storage is a
-// compressed row sparse matrix. The matrix need not be symmetric.
-class BlockRandomAccessCRSMatrix : public BlockRandomAccessMatrix {
+// A thread safe block diagonal matrix implementation of
+// BlockRandomAccessMatrix.
+class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
public:
- // blocks is an array of block sizes. block_pairs is a set of
- // <row_block_id, col_block_id> pairs to identify the non-zero cells
- // of this matrix.
- BlockRandomAccessCRSMatrix(const vector<int>& blocks,
- const set<pair<int, int> >& block_pairs);
+ // blocks is an array of block sizes.
+ BlockRandomAccessDiagonalMatrix(const vector<int>& blocks);
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
- virtual ~BlockRandomAccessCRSMatrix();
+ virtual ~BlockRandomAccessDiagonalMatrix();
// BlockRandomAccessMatrix Interface.
virtual CellInfo* GetCell(int row_block_id,
@@ -74,35 +71,26 @@ class BlockRandomAccessCRSMatrix : public BlockRandomAccessMatrix {
virtual void SetZero();
// Since the matrix is square, num_rows() == num_cols().
- virtual int num_rows() const { return crsm_->num_rows(); }
- virtual int num_cols() const { return crsm_->num_cols(); }
+ virtual int num_rows() const { return tsm_->num_rows(); }
+ virtual int num_cols() const { return tsm_->num_cols(); }
- // Access to the underlying matrix object.
- const CompressedRowSparseMatrix* matrix() const { return crsm_.get(); }
- CompressedRowSparseMatrix* mutable_matrix() { return crsm_.get(); }
+ // Access to the underlying matrix object.
+ const TripletSparseMatrix* matrix() const { return tsm_.get(); }
+ TripletSparseMatrix* mutable_matrix() { return tsm_.get(); }
private:
- int64 IntPairToLong(int a, int b) {
- return a * kMaxRowBlocks + b;
- }
-
- const int64 kMaxRowBlocks;
// row/column block sizes.
const vector<int> blocks_;
- vector<int> col_layout_;
- vector<int> row_strides_;
+ vector<CellInfo*> layout_;
- // A mapping from <row_block_id, col_block_id> to the position in
- // the values array of tsm_ where the block is stored.
- typedef HashMap<long int, CellInfo* > LayoutType;
- LayoutType layout_;
+ // The underlying matrix object which actually stores the cells.
+ scoped_ptr<TripletSparseMatrix> tsm_;
- scoped_ptr<CompressedRowSparseMatrix> crsm_;
- friend class BlockRandomAccessCRSMatrixTest;
- CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessCRSMatrix);
+ friend class BlockRandomAccessDiagonalMatrixTest;
+ CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessDiagonalMatrix);
};
} // namespace internal
} // namespace ceres
-#endif // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_CRS_MATRIX_H_
+#endif // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
diff --git a/internal/ceres/block_random_access_crs_matrix_test.cc b/internal/ceres/block_random_access_diagonal_matrix_test.cc
index 1266c4f..e19268b 100644
--- a/internal/ceres/block_random_access_crs_matrix_test.cc
+++ b/internal/ceres/block_random_access_diagonal_matrix_test.cc
@@ -30,7 +30,8 @@
#include <limits>
#include <vector>
-#include "ceres/block_random_access_crs_matrix.h"
+
+#include "ceres/block_random_access_diagonal_matrix.h"
#include "ceres/internal/eigen.h"
#include "glog/logging.h"
#include "gtest/gtest.h"
@@ -38,59 +39,57 @@
namespace ceres {
namespace internal {
-TEST(BlockRandomAccessCRSMatrix, GetCell) {
+TEST(BlockRandomAccessDiagonalMatrix, GetCell) {
vector<int> blocks;
blocks.push_back(3);
blocks.push_back(4);
blocks.push_back(5);
const int num_rows = 3 + 4 + 5;
+ const int num_nonzeros = 3 * 3 + 4 * 4 + 5 * 5;
- set< pair<int, int> > block_pairs;
- int num_nonzeros = 0;
- block_pairs.insert(make_pair(0, 0));
- num_nonzeros += blocks[0] * blocks[0];
-
- block_pairs.insert(make_pair(1, 1));
- num_nonzeros += blocks[1] * blocks[1];
-
- block_pairs.insert(make_pair(1, 2));
- num_nonzeros += blocks[1] * blocks[2];
-
- block_pairs.insert(make_pair(2, 0));
- num_nonzeros += blocks[2] * blocks[0];
-
- BlockRandomAccessCRSMatrix m(blocks, block_pairs);
+ BlockRandomAccessDiagonalMatrix m(blocks);
EXPECT_EQ(m.num_rows(), num_rows);
EXPECT_EQ(m.num_cols(), num_rows);
- for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
- it != block_pairs.end();
- ++it) {
- const int row_block_id = it->first;
- const int col_block_id = it->second;
+ for (int i = 0; i < blocks.size(); ++i) {
+ const int row_block_id = i;
+ int col_block_id;
int row;
int col;
int row_stride;
int col_stride;
- CellInfo* cell = m.GetCell(row_block_id, col_block_id,
- &row, &col,
- &row_stride, &col_stride);
- EXPECT_TRUE(cell != NULL);
- EXPECT_EQ(row, 0);
- EXPECT_EQ(col, 0);
-
- // Write into the block.
- MatrixRef(cell->values, row_stride, col_stride).block(
- row, col, blocks[row_block_id], blocks[col_block_id]) =
- (row_block_id + 1) * (col_block_id +1) *
- Matrix::Ones(blocks[row_block_id], blocks[col_block_id]);
+
+ for (int j = 0; j < blocks.size(); ++j) {
+ col_block_id = j;
+ CellInfo* cell = m.GetCell(row_block_id, col_block_id,
+ &row, &col,
+ &row_stride, &col_stride);
+ // Off diagonal entries are not present.
+ if (i != j) {
+ EXPECT_TRUE(cell == NULL);
+ continue;
+ }
+
+ EXPECT_TRUE(cell != NULL);
+ EXPECT_EQ(row, 0);
+ EXPECT_EQ(col, 0);
+ EXPECT_EQ(row_stride, blocks[row_block_id]);
+ EXPECT_EQ(col_stride, blocks[col_block_id]);
+
+ // Write into the block
+ MatrixRef(cell->values, row_stride, col_stride).block(
+ row, col, blocks[row_block_id], blocks[col_block_id]) =
+ (row_block_id + 1) * (col_block_id +1) *
+ Matrix::Ones(blocks[row_block_id], blocks[col_block_id]);
+ }
}
- const CompressedRowSparseMatrix* crsm = m.matrix();
- EXPECT_EQ(crsm->num_nonzeros(), num_nonzeros);
+ const TripletSparseMatrix* tsm = m.matrix();
+ EXPECT_EQ(tsm->num_nonzeros(), num_nonzeros);
+ EXPECT_EQ(tsm->max_num_nonzeros(), num_nonzeros);
Matrix dense;
- crsm->ToDenseMatrix(&dense);
+ tsm->ToDenseMatrix(&dense);
double kTolerance = 1e-14;
@@ -98,51 +97,20 @@ TEST(BlockRandomAccessCRSMatrix, GetCell) {
EXPECT_NEAR((dense.block(0, 0, 3, 3) - Matrix::Ones(3, 3)).norm(),
0.0,
kTolerance);
+
// (1,1)
EXPECT_NEAR((dense.block(3, 3, 4, 4) - 2 * 2 * Matrix::Ones(4, 4)).norm(),
0.0,
kTolerance);
- // (1,2)
- EXPECT_NEAR((dense.block(3, 3 + 4, 4, 5) - 2 * 3 * Matrix::Ones(4, 5)).norm(),
- 0.0,
- kTolerance);
- // (2,0)
- EXPECT_NEAR((dense.block(3 + 4, 0, 5, 3) - 3 * 1 * Matrix::Ones(5, 3)).norm(),
+
+ // (1,1)
+ EXPECT_NEAR((dense.block(7, 7, 5, 5) - 3 * 3 * Matrix::Ones(5, 5)).norm(),
0.0,
kTolerance);
// There is nothing else in the matrix besides these four blocks.
- EXPECT_NEAR(dense.norm(), sqrt(9. + 16. * 16. + 36. * 20. + 9. * 15.),
- kTolerance);
-}
-
-// IntPairToLong is private, thus this fixture is needed to access and
-// test it.
-class BlockRandomAccessCRSMatrixTest : public ::testing::Test {
- public:
- virtual void SetUp() {
- vector<int> blocks;
- blocks.push_back(1);
- set< pair<int, int> > block_pairs;
- block_pairs.insert(make_pair(0, 0));
- m_.reset(new BlockRandomAccessCRSMatrix(blocks, block_pairs));
- }
-
- void CheckIntPair(int a, int b) {
- int64 value = m_->IntPairToLong(a, b);
- EXPECT_GT(value, 0) << "Overflow a = " << a << " b = " << b;
- EXPECT_GT(value, a) << "Overflow a = " << a << " b = " << b;
- EXPECT_GT(value, b) << "Overflow a = " << a << " b = " << b;
- }
-
- private:
- scoped_ptr<BlockRandomAccessCRSMatrix> m_;
-};
-
-TEST_F(BlockRandomAccessCRSMatrixTest, IntPairToLongOverflow) {
- CheckIntPair(numeric_limits<int>::max(), numeric_limits<int>::max());
+ EXPECT_NEAR(dense.norm(), sqrt(9.0 + 16. * 16. + 81.0 * 25.), kTolerance);
}
-
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/block_random_access_sparse_matrix.h b/internal/ceres/block_random_access_sparse_matrix.h
index a6b5f39..27b1029 100644
--- a/internal/ceres/block_random_access_sparse_matrix.h
+++ b/internal/ceres/block_random_access_sparse_matrix.h
@@ -47,7 +47,7 @@
namespace ceres {
namespace internal {
-// A threaf safe square block sparse implementation of
+// A thread safe square block sparse implementation of
// BlockRandomAccessMatrix. Internally a TripletSparseMatrix is used
// for doing the actual storage. This class augments this matrix with
// an unordered_map that allows random read/write access.
diff --git a/internal/ceres/block_structure.cc b/internal/ceres/block_structure.cc
index 5a1a5e1..00c4ce2 100644
--- a/internal/ceres/block_structure.cc
+++ b/internal/ceres/block_structure.cc
@@ -34,6 +34,9 @@ namespace ceres {
namespace internal {
bool CellLessThan(const Cell& lhs, const Cell& rhs) {
+ if (lhs.block_id == rhs.block_id) {
+ return (lhs.position < rhs.position);
+ }
return (lhs.block_id < rhs.block_id);
}
diff --git a/internal/ceres/block_structure.h b/internal/ceres/block_structure.h
index f509067..656716e 100644
--- a/internal/ceres/block_structure.h
+++ b/internal/ceres/block_structure.h
@@ -45,9 +45,7 @@
namespace ceres {
namespace internal {
-class BlockStructureProto;
-
-typedef int16 BlockSize;
+typedef int32 BlockSize;
struct Block {
Block() : size(-1), position(-1) {}
@@ -89,16 +87,6 @@ struct CompressedColumnBlockStructure {
vector<CompressedColumn> cols;
};
-// Deserialize the given block structure proto to the given block structure.
-// Destroys previous contents of block_structure.
-void ProtoToBlockStructure(const BlockStructureProto &proto,
- CompressedRowBlockStructure *block_structure);
-
-// Serialize the given block structure to the given proto. Destroys previous
-// contents of proto.
-void BlockStructureToProto(const CompressedRowBlockStructure &block_structure,
- BlockStructureProto *proto);
-
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/callbacks.cc b/internal/ceres/callbacks.cc
new file mode 100644
index 0000000..d223633
--- /dev/null
+++ b/internal/ceres/callbacks.cc
@@ -0,0 +1,109 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <iostream> // NO LINT
+#include "ceres/callbacks.h"
+#include "ceres/program.h"
+#include "ceres/stringprintf.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+StateUpdatingCallback::StateUpdatingCallback(Program* program,
+ double* parameters)
+ : program_(program), parameters_(parameters) {}
+
+StateUpdatingCallback::~StateUpdatingCallback() {}
+
+CallbackReturnType StateUpdatingCallback::operator()(
+ const IterationSummary& summary) {
+ if (summary.step_is_successful) {
+ program_->StateVectorToParameterBlocks(parameters_);
+ program_->CopyParameterBlockStateToUserState();
+ }
+ return SOLVER_CONTINUE;
+}
+
+LoggingCallback::LoggingCallback(const MinimizerType minimizer_type,
+ const bool log_to_stdout)
+ : minimizer_type(minimizer_type),
+ log_to_stdout_(log_to_stdout) {}
+
+LoggingCallback::~LoggingCallback() {}
+
+CallbackReturnType LoggingCallback::operator()(
+ const IterationSummary& summary) {
+ string output;
+ if (minimizer_type == LINE_SEARCH) {
+ const char* kReportRowFormat =
+ "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
+ "s:% 3.2e e:% 3d it:% 3.2e tt:% 3.2e";
+ output = StringPrintf(kReportRowFormat,
+ summary.iteration,
+ summary.cost,
+ summary.cost_change,
+ summary.gradient_max_norm,
+ summary.step_norm,
+ summary.step_size,
+ summary.line_search_function_evaluations,
+ summary.iteration_time_in_seconds,
+ summary.cumulative_time_in_seconds);
+ } else if (minimizer_type == TRUST_REGION) {
+ if (summary.iteration == 0) {
+ output = "iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time\n";
+ }
+ const char* kReportRowFormat =
+ "% 4d % 8e % 3.2e % 3.2e % 3.2e % 3.2e % 3.2e % 3d % 3.2e % 3.2e";
+ output += StringPrintf(kReportRowFormat,
+ summary.iteration,
+ summary.cost,
+ summary.cost_change,
+ summary.gradient_max_norm,
+ summary.step_norm,
+ summary.relative_decrease,
+ summary.trust_region_radius,
+ summary.linear_solver_iterations,
+ summary.iteration_time_in_seconds,
+ summary.cumulative_time_in_seconds);
+ } else {
+ LOG(FATAL) << "Unknown minimizer type.";
+ }
+
+ if (log_to_stdout_) {
+ cout << output << endl;
+ } else {
+ VLOG(1) << output;
+ }
+ return SOLVER_CONTINUE;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/callbacks.h b/internal/ceres/callbacks.h
new file mode 100644
index 0000000..93704df
--- /dev/null
+++ b/internal/ceres/callbacks.h
@@ -0,0 +1,71 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_CALLBACKS_H_
+#define CERES_INTERNAL_CALLBACKS_H_
+
+#include <string>
+#include "ceres/iteration_callback.h"
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+
+// Callback for updating the externally visible state of parameter
+// blocks.
+class StateUpdatingCallback : public IterationCallback {
+ public:
+ StateUpdatingCallback(Program* program, double* parameters);
+ virtual ~StateUpdatingCallback();
+ virtual CallbackReturnType operator()(const IterationSummary& summary);
+ private:
+ Program* program_;
+ double* parameters_;
+};
+
+// Callback for logging the state of the minimizer to STDERR or
+// STDOUT depending on the user's preferences and logging level.
+class LoggingCallback : public IterationCallback {
+ public:
+ LoggingCallback(MinimizerType minimizer_type, bool log_to_stdout);
+ virtual ~LoggingCallback();
+ virtual CallbackReturnType operator()(const IterationSummary& summary);
+
+ private:
+ const MinimizerType minimizer_type;
+ const bool log_to_stdout_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_CALLBACKS_H_
diff --git a/internal/ceres/canonical_views_clustering.cc b/internal/ceres/canonical_views_clustering.cc
index 6531945..2f032e6 100644
--- a/internal/ceres/canonical_views_clustering.cc
+++ b/internal/ceres/canonical_views_clustering.cc
@@ -29,6 +29,9 @@
// Author: David Gallup (dgallup@google.com)
// Sameer Agarwal (sameeragarwal@google.com)
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_NO_SUITESPARSE
#include "ceres/canonical_views_clustering.h"
@@ -57,8 +60,8 @@ class CanonicalViewsClustering {
// configuration of the clustering algorithm that some of the
// vertices may not be assigned to any cluster. In this case they
// are assigned to a cluster with id = kInvalidClusterId.
- void ComputeClustering(const Graph<int>& graph,
- const CanonicalViewsClusteringOptions& options,
+ void ComputeClustering(const CanonicalViewsClusteringOptions& options,
+ const Graph<int>& graph,
vector<int>* centers,
IntMap* membership);
@@ -81,21 +84,21 @@ class CanonicalViewsClustering {
};
void ComputeCanonicalViewsClustering(
- const Graph<int>& graph,
const CanonicalViewsClusteringOptions& options,
+ const Graph<int>& graph,
vector<int>* centers,
IntMap* membership) {
time_t start_time = time(NULL);
CanonicalViewsClustering cv;
- cv.ComputeClustering(graph, options, centers, membership);
+ cv.ComputeClustering(options, graph, centers, membership);
VLOG(2) << "Canonical views clustering time (secs): "
<< time(NULL) - start_time;
}
// Implementation of CanonicalViewsClustering
void CanonicalViewsClustering::ComputeClustering(
- const Graph<int>& graph,
const CanonicalViewsClusteringOptions& options,
+ const Graph<int>& graph,
vector<int>* centers,
IntMap* membership) {
options_ = options;
diff --git a/internal/ceres/canonical_views_clustering.h b/internal/ceres/canonical_views_clustering.h
index 48d1ed2..1b4c4ee 100644
--- a/internal/ceres/canonical_views_clustering.h
+++ b/internal/ceres/canonical_views_clustering.h
@@ -41,15 +41,15 @@
#ifndef CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
#define CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_NO_SUITESPARSE
#include <vector>
#include "ceres/collections_port.h"
#include "ceres/graph.h"
-#include "ceres/internal/macros.h"
-#include "ceres/map_util.h"
-#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -100,8 +100,8 @@ struct CanonicalViewsClusteringOptions;
// algorithm that some of the vertices may not be assigned to any
// cluster. In this case they are assigned to a cluster with id = -1;
void ComputeCanonicalViewsClustering(
- const Graph<int>& graph,
const CanonicalViewsClusteringOptions& options,
+ const Graph<int>& graph,
vector<int>* centers,
HashMap<int, int>* membership);
diff --git a/internal/ceres/canonical_views_clustering_test.cc b/internal/ceres/canonical_views_clustering_test.cc
index 78d5635..f86084a 100644
--- a/internal/ceres/canonical_views_clustering_test.cc
+++ b/internal/ceres/canonical_views_clustering_test.cc
@@ -29,6 +29,9 @@
// Author: Sameer Agarwal (sameeragarwal@google.com)
// David Gallup (dgallup@google.com)
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_NO_SUITESPARSE
#include "ceres/canonical_views_clustering.h"
@@ -69,7 +72,7 @@ class CanonicalViewsTest : public ::testing::Test {
}
void ComputeClustering() {
- ComputeCanonicalViewsClustering(graph_, options_, &centers_, &membership_);
+ ComputeCanonicalViewsClustering(options_, graph_, &centers_, &membership_);
}
Graph<int> graph_;
diff --git a/internal/ceres/cgnr_solver.cc b/internal/ceres/cgnr_solver.cc
index 9b8f980..88e61d9 100644
--- a/internal/ceres/cgnr_solver.cc
+++ b/internal/ceres/cgnr_solver.cc
@@ -33,6 +33,7 @@
#include "ceres/block_jacobi_preconditioner.h"
#include "ceres/cgnr_linear_operator.h"
#include "ceres/conjugate_gradients_solver.h"
+#include "ceres/internal/eigen.h"
#include "ceres/linear_solver.h"
#include "ceres/wall_time.h"
#include "glog/logging.h"
@@ -43,6 +44,10 @@ namespace internal {
CgnrSolver::CgnrSolver(const LinearSolver::Options& options)
: options_(options),
preconditioner_(NULL) {
+ if (options_.preconditioner_type != JACOBI &&
+ options_.preconditioner_type != IDENTITY) {
+ LOG(FATAL) << "CGNR only supports IDENTITY and JACOBI preconditioners.";
+ }
}
LinearSolver::Summary CgnrSolver::SolveImpl(
@@ -53,9 +58,9 @@ LinearSolver::Summary CgnrSolver::SolveImpl(
EventLogger event_logger("CgnrSolver::Solve");
// Form z = Atb.
- scoped_array<double> z(new double[A->num_cols()]);
- std::fill(z.get(), z.get() + A->num_cols(), 0.0);
- A->LeftMultiply(b, z.get());
+ Vector z(A->num_cols());
+ z.setZero();
+ A->LeftMultiply(b, z.data());
// Precondition if necessary.
LinearSolver::PerSolveOptions cg_per_solve_options = per_solve_options;
@@ -65,20 +70,17 @@ LinearSolver::Summary CgnrSolver::SolveImpl(
}
preconditioner_->Update(*A, per_solve_options.D);
cg_per_solve_options.preconditioner = preconditioner_.get();
- } else if (options_.preconditioner_type != IDENTITY) {
- LOG(FATAL) << "CGNR only supports IDENTITY and JACOBI preconditioners.";
}
// Solve (AtA + DtD)x = z (= Atb).
- std::fill(x, x + A->num_cols(), 0.0);
+ VectorRef(x, A->num_cols()).setZero();
CgnrLinearOperator lhs(*A, per_solve_options.D);
event_logger.AddEvent("Setup");
ConjugateGradientsSolver conjugate_gradient_solver(options_);
LinearSolver::Summary summary =
- conjugate_gradient_solver.Solve(&lhs, z.get(), cg_per_solve_options, x);
+ conjugate_gradient_solver.Solve(&lhs, z.data(), cg_per_solve_options, x);
event_logger.AddEvent("Solve");
-
return summary;
}
diff --git a/internal/ceres/collections_port.h b/internal/ceres/collections_port.h
index 715c975..3f976b9 100644
--- a/internal/ceres/collections_port.h
+++ b/internal/ceres/collections_port.h
@@ -33,26 +33,48 @@
#ifndef CERES_INTERNAL_COLLECTIONS_PORT_H_
#define CERES_INTERNAL_COLLECTIONS_PORT_H_
-#if defined(CERES_NO_TR1)
+#include "ceres/internal/port.h"
+
+#if defined(CERES_NO_UNORDERED_MAP)
# include <map>
# include <set>
-#else
-# if defined(_MSC_VER)
-# include <unordered_map>
-# include <unordered_set>
-# else
-# include <tr1/unordered_map>
-# include <tr1/unordered_set>
-# endif
#endif
+
+#if defined(CERES_TR1_UNORDERED_MAP)
+# include <tr1/unordered_map>
+# include <tr1/unordered_set>
+# define CERES_HASH_NAMESPACE_START namespace std { namespace tr1 {
+# define CERES_HASH_NAMESPACE_END } }
+#endif
+
+#if defined(CERES_STD_UNORDERED_MAP)
+# include <unordered_map>
+# include <unordered_set>
+# define CERES_HASH_NAMESPACE_START namespace std {
+# define CERES_HASH_NAMESPACE_END }
+#endif
+
+#if defined(CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE)
+# include <unordered_map>
+# include <unordered_set>
+# define CERES_HASH_NAMESPACE_START namespace std { namespace tr1 {
+# define CERES_HASH_NAMESPACE_END } }
+#endif
+
+#if !defined(CERES_NO_UNORDERED_MAP) && !defined(CERES_TR1_UNORDERED_MAP) && \
+ !defined(CERES_STD_UNORDERED_MAP) && !defined(CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE) // NOLINT
+# error One of: CERES_NO_UNORDERED_MAP, CERES_TR1_UNORDERED_MAP,\
+ CERES_STD_UNORDERED_MAP, CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE must be defined! // NOLINT
+#endif
+
#include <utility>
#include "ceres/integral_types.h"
#include "ceres/internal/port.h"
-// Some systems don't have access to TR1. In that case, substitute the hash
-// map/set with normal map/set. The price to pay is slightly slower speed for
-// some operations.
-#if defined(CERES_NO_TR1)
+// Some systems don't have access to unordered_map/unordered_set. In
+// that case, substitute the hash map/set with normal map/set. The
+// price to pay is slower speed for some operations.
+#if defined(CERES_NO_UNORDERED_MAP)
namespace ceres {
namespace internal {
@@ -71,11 +93,20 @@ struct HashSet : set<K> {};
namespace ceres {
namespace internal {
+#if defined(CERES_TR1_UNORDERED_MAP) || \
+ defined(CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE)
template<typename K, typename V>
struct HashMap : std::tr1::unordered_map<K, V> {};
-
template<typename K>
struct HashSet : std::tr1::unordered_set<K> {};
+#endif
+
+#if defined(CERES_STD_UNORDERED_MAP)
+template<typename K, typename V>
+struct HashMap : std::unordered_map<K, V> {};
+template<typename K>
+struct HashSet : std::unordered_set<K> {};
+#endif
#if defined(_WIN32) && !defined(__MINGW64__) && !defined(__MINGW32__)
#define GG_LONGLONG(x) x##I64
@@ -162,6 +193,5 @@ struct hash<pair<T, T> > {
CERES_HASH_NAMESPACE_END
-#endif // CERES_NO_TR1
-
+#endif // CERES_NO_UNORDERED_MAP
#endif // CERES_INTERNAL_COLLECTIONS_PORT_H_
diff --git a/internal/ceres/compressed_row_jacobian_writer.cc b/internal/ceres/compressed_row_jacobian_writer.cc
index bbadb77..ed8db14 100644
--- a/internal/ceres/compressed_row_jacobian_writer.cc
+++ b/internal/ceres/compressed_row_jacobian_writer.cc
@@ -40,6 +40,44 @@
namespace ceres {
namespace internal {
+void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
+ const Program* program, CompressedRowSparseMatrix* jacobian) {
+ const vector<ParameterBlock*>& parameter_blocks =
+ program->parameter_blocks();
+ vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
+ col_blocks.resize(parameter_blocks.size());
+ for (int i = 0; i < parameter_blocks.size(); ++i) {
+ col_blocks[i] = parameter_blocks[i]->LocalSize();
+ }
+
+ const vector<ResidualBlock*>& residual_blocks =
+ program->residual_blocks();
+ vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
+ row_blocks.resize(residual_blocks.size());
+ for (int i = 0; i < residual_blocks.size(); ++i) {
+ row_blocks[i] = residual_blocks[i]->NumResiduals();
+ }
+}
+
+void CompressedRowJacobianWriter::GetOrderedParameterBlocks(
+ const Program* program,
+ int residual_id,
+ vector<pair<int, int> >* evaluated_jacobian_blocks) {
+ const ResidualBlock* residual_block =
+ program->residual_blocks()[residual_id];
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ const ParameterBlock* parameter_block =
+ residual_block->parameter_blocks()[j];
+ if (!parameter_block->IsConstant()) {
+ evaluated_jacobian_blocks->push_back(
+ make_pair(parameter_block->index(), j));
+ }
+ }
+ sort(evaluated_jacobian_blocks->begin(), evaluated_jacobian_blocks->end());
+}
+
SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
const vector<ResidualBlock*>& residual_blocks =
program_->residual_blocks();
@@ -71,7 +109,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
total_num_effective_parameters,
num_jacobian_nonzeros + total_num_effective_parameters);
- // At this stage, the CompressedSparseMatrix is an invalid state. But this
+ // At this stage, the CompressedRowSparseMatrix is an invalid state. But this
// seems to be the only way to construct it without doing a memory copy.
int* rows = jacobian->mutable_rows();
int* cols = jacobian->mutable_cols();
@@ -132,22 +170,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
}
CHECK_EQ(num_jacobian_nonzeros, rows[total_num_residuals]);
- // Populate the row and column block vectors for use by block
- // oriented ordering algorithms. This is useful when
- // Solver::Options::use_block_amd = true.
- const vector<ParameterBlock*>& parameter_blocks =
- program_->parameter_blocks();
- vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
- col_blocks.resize(parameter_blocks.size());
- for (int i = 0; i < parameter_blocks.size(); ++i) {
- col_blocks[i] = parameter_blocks[i]->LocalSize();
- }
-
- vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
- row_blocks.resize(residual_blocks.size());
- for (int i = 0; i < residual_blocks.size(); ++i) {
- row_blocks[i] = residual_blocks[i]->NumResiduals();
- }
+ PopulateJacobianRowAndColumnBlockVectors(program_, jacobian);
return jacobian;
}
@@ -164,25 +187,10 @@ void CompressedRowJacobianWriter::Write(int residual_id,
const ResidualBlock* residual_block =
program_->residual_blocks()[residual_id];
- const int num_parameter_blocks = residual_block->NumParameterBlocks();
const int num_residuals = residual_block->NumResiduals();
- // It is necessary to determine the order of the jacobian blocks before
- // copying them into the CompressedRowSparseMatrix. Just because a cost
- // function uses parameter blocks 1 after 2 in its arguments does not mean
- // that the block 1 occurs before block 2 in the column layout of the
- // jacobian. Thus, determine the order by sorting the jacobian blocks by their
- // position in the state vector.
vector<pair<int, int> > evaluated_jacobian_blocks;
- for (int j = 0; j < num_parameter_blocks; ++j) {
- const ParameterBlock* parameter_block =
- residual_block->parameter_blocks()[j];
- if (!parameter_block->IsConstant()) {
- evaluated_jacobian_blocks.push_back(
- make_pair(parameter_block->index(), j));
- }
- }
- sort(evaluated_jacobian_blocks.begin(), evaluated_jacobian_blocks.end());
+ GetOrderedParameterBlocks(program_, residual_id, &evaluated_jacobian_blocks);
// Where in the current row does the jacobian for a parameter block begin.
int col_pos = 0;
diff --git a/internal/ceres/compressed_row_jacobian_writer.h b/internal/ceres/compressed_row_jacobian_writer.h
index c103165..a722a7c 100644
--- a/internal/ceres/compressed_row_jacobian_writer.h
+++ b/internal/ceres/compressed_row_jacobian_writer.h
@@ -39,6 +39,7 @@
namespace ceres {
namespace internal {
+class CompressedRowSparseMatrix;
class Program;
class SparseMatrix;
@@ -49,11 +50,44 @@ class CompressedRowJacobianWriter {
: program_(program) {
}
+ // PopulateJacobianRowAndColumnBlockVectors sets col_blocks and
+ // row_blocks for a CompressedRowSparseMatrix, based on the
+ // parameter block sizes and residual sizes respectively from the
+ // program. This is useful when Solver::Options::use_block_amd =
+ // true;
+ //
+ // This function is static so that it is available to other jacobian
+ // writers which use CompressedRowSparseMatrix (or derived types).
+ // (Jacobian writers do not fall under any type hierarchy; they only
+ // have to provide an interface as specified in program_evaluator.h).
+ static void PopulateJacobianRowAndColumnBlockVectors(
+ const Program* program,
+ CompressedRowSparseMatrix* jacobian);
+
+ // It is necessary to determine the order of the jacobian blocks
+ // before copying them into a CompressedRowSparseMatrix (or derived
+ // type). Just because a cost function uses parameter blocks 1
+ // after 2 in its arguments does not mean that the block 1 occurs
+ // before block 2 in the column layout of the jacobian. Thus,
+ // GetOrderedParameterBlocks determines the order by sorting the
+ // jacobian blocks by their position in the state vector.
+ //
+ // This function is static so that it is available to other jacobian
+ // writers which use CompressedRowSparseMatrix (or derived types).
+ // (Jacobian writers do not fall under any type hierarchy; they only
+ // have to provide an interface as specified in
+ // program_evaluator.h).
+ static void GetOrderedParameterBlocks(
+ const Program* program,
+ int residual_id,
+ vector<pair<int, int> >* evaluated_jacobian_blocks);
+
// JacobianWriter interface.
- // Since the compressed row matrix has different layout than that assumed by
- // the cost functions, use scratch space to store the jacobians temporarily
- // then copy them over to the larger jacobian in the Write() function.
+ // Since the compressed row matrix has different layout than that
+ // assumed by the cost functions, use scratch space to store the
+ // jacobians temporarily then copy them over to the larger jacobian
+ // in the Write() function.
ScratchEvaluatePreparer* CreateEvaluatePreparers(int num_threads) {
return ScratchEvaluatePreparer::Create(*program_, num_threads);
}
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
index e200c92..7993ed6 100644
--- a/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -31,6 +31,7 @@
#include "ceres/compressed_row_sparse_matrix.h"
#include <algorithm>
+#include <numeric>
#include <vector>
#include "ceres/crs_matrix.h"
#include "ceres/internal/port.h"
@@ -124,7 +125,7 @@ CompressedRowSparseMatrix::CompressedRowSparseMatrix(
// Find the cumulative sum of the row counts.
for (int i = 1; i < num_rows_ + 1; ++i) {
- rows_[i] += rows_[i-1];
+ rows_[i] += rows_[i - 1];
}
CHECK_EQ(num_nonzeros(), m.num_nonzeros());
@@ -215,11 +216,28 @@ void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
num_rows_ -= delta_rows;
rows_.resize(num_rows_ + 1);
+
+ // Walk the list of row blocks until we reach the new number of rows
+ // and the drop the rest of the row blocks.
+ int num_row_blocks = 0;
+ int num_rows = 0;
+ while (num_row_blocks < row_blocks_.size() && num_rows < num_rows_) {
+ num_rows += row_blocks_[num_row_blocks];
+ ++num_row_blocks;
+ }
+
+ row_blocks_.resize(num_row_blocks);
}
void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
CHECK_EQ(m.num_cols(), num_cols_);
+ CHECK(row_blocks_.size() == 0 || m.row_blocks().size() !=0)
+ << "Cannot append a matrix with row blocks to one without and vice versa."
+ << "This matrix has : " << row_blocks_.size() << " row blocks."
+ << "The matrix being appended has: " << m.row_blocks().size()
+ << " row blocks.";
+
if (cols_.size() < num_nonzeros() + m.num_nonzeros()) {
cols_.resize(num_nonzeros() + m.num_nonzeros());
values_.resize(num_nonzeros() + m.num_nonzeros());
@@ -239,6 +257,7 @@ void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
}
num_rows_ += m.num_rows();
+ row_blocks_.insert(row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end());
}
void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
@@ -267,6 +286,13 @@ void CompressedRowSparseMatrix::ToCRSMatrix(CRSMatrix* matrix) const {
matrix->values.resize(matrix->rows[matrix->num_rows]);
}
+void CompressedRowSparseMatrix::SetMaxNumNonZeros(int num_nonzeros) {
+ CHECK_GE(num_nonzeros, 0);
+
+ cols_.resize(num_nonzeros);
+ values_.resize(num_nonzeros);
+}
+
void CompressedRowSparseMatrix::SolveLowerTriangularInPlace(
double* solution) const {
for (int r = 0; r < num_rows_; ++r) {
@@ -358,9 +384,161 @@ CompressedRowSparseMatrix* CompressedRowSparseMatrix::Transpose() const {
}
transpose_rows[0] = 0;
+ *(transpose->mutable_row_blocks()) = col_blocks_;
+ *(transpose->mutable_col_blocks()) = row_blocks_;
+
return transpose;
}
+namespace {
+// A ProductTerm is a term in the outer product of a matrix with
+// itself.
+struct ProductTerm {
+ ProductTerm(const int row, const int col, const int index)
+ : row(row), col(col), index(index) {
+ }
+
+ bool operator<(const ProductTerm& right) const {
+ if (row == right.row) {
+ if (col == right.col) {
+ return index < right.index;
+ }
+ return col < right.col;
+ }
+ return row < right.row;
+ }
+
+ int row;
+ int col;
+ int index;
+};
+
+CompressedRowSparseMatrix*
+CompressAndFillProgram(const int num_rows,
+ const int num_cols,
+ const vector<ProductTerm>& product,
+ vector<int>* program) {
+ CHECK_GT(product.size(), 0);
+
+ // Count the number of unique product term, which in turn is the
+ // number of non-zeros in the outer product.
+ int num_nonzeros = 1;
+ for (int i = 1; i < product.size(); ++i) {
+ if (product[i].row != product[i - 1].row ||
+ product[i].col != product[i - 1].col) {
+ ++num_nonzeros;
+ }
+ }
+
+ CompressedRowSparseMatrix* matrix =
+ new CompressedRowSparseMatrix(num_rows, num_cols, num_nonzeros);
+
+ int* crsm_rows = matrix->mutable_rows();
+ std::fill(crsm_rows, crsm_rows + num_rows + 1, 0);
+ int* crsm_cols = matrix->mutable_cols();
+ std::fill(crsm_cols, crsm_cols + num_nonzeros, 0);
+
+ CHECK_NOTNULL(program)->clear();
+ program->resize(product.size());
+
+ // Iterate over the sorted product terms. This means each row is
+ // filled one at a time, and we are able to assign a position in the
+ // values array to each term.
+ //
+ // If terms repeat, i.e., they contribute to the same entry in the
+ // result matrix), then they do not affect the sparsity structure of
+ // the result matrix.
+ int nnz = 0;
+ crsm_cols[0] = product[0].col;
+ crsm_rows[product[0].row + 1]++;
+ (*program)[product[0].index] = nnz;
+ for (int i = 1; i < product.size(); ++i) {
+ const ProductTerm& previous = product[i - 1];
+ const ProductTerm& current = product[i];
+
+ // Sparsity structure is updated only if the term is not a repeat.
+ if (previous.row != current.row || previous.col != current.col) {
+ crsm_cols[++nnz] = current.col;
+ crsm_rows[current.row + 1]++;
+ }
+
+ // All terms get assigned the position in the values array where
+ // their value is accumulated.
+ (*program)[current.index] = nnz;
+ }
+
+ for (int i = 1; i < num_rows + 1; ++i) {
+ crsm_rows[i] += crsm_rows[i - 1];
+ }
+
+ return matrix;
+}
+
+} // namespace
+
+CompressedRowSparseMatrix*
+CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
+ const CompressedRowSparseMatrix& m,
+ vector<int>* program) {
+ CHECK_NOTNULL(program)->clear();
+ CHECK_GT(m.num_nonzeros(), 0) << "Congratulations, "
+ << "you found a bug in Ceres. Please report it.";
+
+ vector<ProductTerm> product;
+ const vector<int>& row_blocks = m.row_blocks();
+ int row_block_begin = 0;
+ // Iterate over row blocks
+ for (int row_block = 0; row_block < row_blocks.size(); ++row_block) {
+ const int row_block_end = row_block_begin + row_blocks[row_block];
+ // Compute the outer product terms for just one row per row block.
+ const int r = row_block_begin;
+ // Compute the lower triangular part of the product.
+ for (int idx1 = m.rows()[r]; idx1 < m.rows()[r + 1]; ++idx1) {
+ for (int idx2 = m.rows()[r]; idx2 <= idx1; ++idx2) {
+ product.push_back(ProductTerm(m.cols()[idx1], m.cols()[idx2], product.size()));
+ }
+ }
+ row_block_begin = row_block_end;
+ }
+ CHECK_EQ(row_block_begin, m.num_rows());
+ sort(product.begin(), product.end());
+ return CompressAndFillProgram(m.num_cols(), m.num_cols(), product, program);
+}
+
+void CompressedRowSparseMatrix::ComputeOuterProduct(
+ const CompressedRowSparseMatrix& m,
+ const vector<int>& program,
+ CompressedRowSparseMatrix* result) {
+ result->SetZero();
+ double* values = result->mutable_values();
+ const vector<int>& row_blocks = m.row_blocks();
+
+ int cursor = 0;
+ int row_block_begin = 0;
+ const double* m_values = m.values();
+ const int* m_rows = m.rows();
+ // Iterate over row blocks.
+ for (int row_block = 0; row_block < row_blocks.size(); ++row_block) {
+ const int row_block_end = row_block_begin + row_blocks[row_block];
+ const int saved_cursor = cursor;
+ for (int r = row_block_begin; r < row_block_end; ++r) {
+ // Reuse the program segment for each row in this row block.
+ cursor = saved_cursor;
+ const int row_begin = m_rows[r];
+ const int row_end = m_rows[r + 1];
+ for (int idx1 = row_begin; idx1 < row_end; ++idx1) {
+ const double v1 = m_values[idx1];
+ for (int idx2 = row_begin; idx2 <= idx1; ++idx2, ++cursor) {
+ values[program[cursor]] += v1 * m_values[idx2];
+ }
+ }
+ }
+ row_block_begin = row_block_end;
+ }
+
+ CHECK_EQ(row_block_begin, m.num_rows());
+ CHECK_EQ(cursor, program.size());
+}
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h
index c5721eb..a0ba7ee 100644
--- a/internal/ceres/compressed_row_sparse_matrix.h
+++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -115,6 +115,9 @@ class CompressedRowSparseMatrix : public SparseMatrix {
const vector<int>& col_blocks() const { return col_blocks_; }
vector<int>* mutable_col_blocks() { return &col_blocks_; }
+ // Destructive array resizing method.
+ void SetMaxNumNonZeros(int num_nonzeros);
+
// Non-destructive array resizing method.
void set_num_rows(const int num_rows) { num_rows_ = num_rows; }
void set_num_cols(const int num_cols) { num_cols_ = num_cols; }
@@ -128,6 +131,32 @@ class CompressedRowSparseMatrix : public SparseMatrix {
const double* diagonal,
const vector<int>& blocks);
+ // Compute the sparsity structure of the product m.transpose() * m
+ // and create a CompressedRowSparseMatrix corresponding to it.
+ //
+ // Also compute a "program" vector, which for every term in the
+ // outer product points to the entry in the values array of the
+ // result matrix where it should be accumulated.
+ //
+ // This program is used by the ComputeOuterProduct function below to
+ // compute the outer product.
+ //
+ // Since the entries of the program are the same for rows with the
+ // same sparsity structure, the program only stores the result for
+ // one row per row block. The ComputeOuterProduct function reuses
+ // this information for each row in the row block.
+ static CompressedRowSparseMatrix* CreateOuterProductMatrixAndProgram(
+ const CompressedRowSparseMatrix& m,
+ vector<int>* program);
+
+ // Compute the values array for the expression m.transpose() * m,
+ // where the matrix used to store the result and a program have been
+ // created using the CreateOuterProductMatrixAndProgram function
+ // above.
+ static void ComputeOuterProduct(const CompressedRowSparseMatrix& m,
+ const vector<int>& program,
+ CompressedRowSparseMatrix* result);
+
private:
int num_rows_;
int num_cols_;
diff --git a/internal/ceres/compressed_row_sparse_matrix_test.cc b/internal/ceres/compressed_row_sparse_matrix_test.cc
index 02109cc..999a661 100644
--- a/internal/ceres/compressed_row_sparse_matrix_test.cc
+++ b/internal/ceres/compressed_row_sparse_matrix_test.cc
@@ -30,11 +30,14 @@
#include "ceres/compressed_row_sparse_matrix.h"
+#include <numeric>
#include "ceres/casts.h"
#include "ceres/crs_matrix.h"
+#include "ceres/cxsparse.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_least_squares_problems.h"
+#include "ceres/random.h"
#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
#include "gtest/gtest.h"
@@ -76,6 +79,14 @@ class CompressedRowSparseMatrixTest : public ::testing::Test {
num_rows = tsm->num_rows();
num_cols = tsm->num_cols();
+
+ vector<int>* row_blocks = crsm->mutable_row_blocks();
+ row_blocks->resize(num_rows);
+ std::fill(row_blocks->begin(), row_blocks->end(), 1);
+
+ vector<int>* col_blocks = crsm->mutable_col_blocks();
+ col_blocks->resize(num_cols);
+ std::fill(col_blocks->begin(), col_blocks->end(), 1);
}
int num_rows;
@@ -126,6 +137,9 @@ TEST_F(CompressedRowSparseMatrixTest, Scale) {
}
TEST_F(CompressedRowSparseMatrixTest, DeleteRows) {
+ // Clear the row and column blocks as these are purely scalar tests.
+ crsm->mutable_row_blocks()->clear();
+ crsm->mutable_col_blocks()->clear();
for (int i = 0; i < num_rows; ++i) {
tsm->Resize(num_rows - i, num_cols);
crsm->DeleteRows(crsm->num_rows() - tsm->num_rows());
@@ -134,6 +148,10 @@ TEST_F(CompressedRowSparseMatrixTest, DeleteRows) {
}
TEST_F(CompressedRowSparseMatrixTest, AppendRows) {
+ // Clear the row and column blocks as these are purely scalar tests.
+ crsm->mutable_row_blocks()->clear();
+ crsm->mutable_col_blocks()->clear();
+
for (int i = 0; i < num_rows; ++i) {
TripletSparseMatrix tsm_appendage(*tsm);
tsm_appendage.Resize(i, num_cols);
@@ -146,6 +164,47 @@ TEST_F(CompressedRowSparseMatrixTest, AppendRows) {
}
}
+TEST_F(CompressedRowSparseMatrixTest, AppendAndDeleteBlockDiagonalMatrix) {
+ int num_diagonal_rows = crsm->num_cols();
+
+ scoped_array<double> diagonal(new double[num_diagonal_rows]);
+ for (int i = 0; i < num_diagonal_rows; ++i) {
+ diagonal[i] =i;
+ }
+
+ vector<int> row_and_column_blocks;
+ row_and_column_blocks.push_back(1);
+ row_and_column_blocks.push_back(2);
+ row_and_column_blocks.push_back(2);
+
+ const vector<int> pre_row_blocks = crsm->row_blocks();
+ const vector<int> pre_col_blocks = crsm->col_blocks();
+
+ scoped_ptr<CompressedRowSparseMatrix> appendage(
+ CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
+ diagonal.get(), row_and_column_blocks));
+ LOG(INFO) << appendage->row_blocks().size();
+
+ crsm->AppendRows(*appendage);
+
+ const vector<int> post_row_blocks = crsm->row_blocks();
+ const vector<int> post_col_blocks = crsm->col_blocks();
+
+ vector<int> expected_row_blocks = pre_row_blocks;
+ expected_row_blocks.insert(expected_row_blocks.end(),
+ row_and_column_blocks.begin(),
+ row_and_column_blocks.end());
+
+ vector<int> expected_col_blocks = pre_col_blocks;
+
+ EXPECT_EQ(expected_row_blocks, crsm->row_blocks());
+ EXPECT_EQ(expected_col_blocks, crsm->col_blocks());
+
+ crsm->DeleteRows(num_diagonal_rows);
+ EXPECT_EQ(crsm->row_blocks(), pre_row_blocks);
+ EXPECT_EQ(crsm->col_blocks(), pre_col_blocks);
+}
+
TEST_F(CompressedRowSparseMatrixTest, ToDenseMatrix) {
Matrix tsm_dense;
Matrix crsm_dense;
@@ -279,10 +338,22 @@ TEST(CompressedRowSparseMatrix, Transpose) {
// 13 0 14 15 9 0
// 0 16 17 0 0 0
+ // Block structure:
+ // A A A A B B
+ // A A A A B B
+ // A A A A B B
+ // C C C C D D
+ // C C C C D D
+ // C C C C D D
+
CompressedRowSparseMatrix matrix(5, 6, 30);
int* rows = matrix.mutable_rows();
int* cols = matrix.mutable_cols();
double* values = matrix.mutable_values();
+ matrix.mutable_row_blocks()->push_back(3);
+ matrix.mutable_row_blocks()->push_back(3);
+ matrix.mutable_col_blocks()->push_back(4);
+ matrix.mutable_col_blocks()->push_back(2);
rows[0] = 0;
cols[0] = 1;
@@ -317,6 +388,16 @@ TEST(CompressedRowSparseMatrix, Transpose) {
scoped_ptr<CompressedRowSparseMatrix> transpose(matrix.Transpose());
+ ASSERT_EQ(transpose->row_blocks().size(), matrix.col_blocks().size());
+ for (int i = 0; i < transpose->row_blocks().size(); ++i) {
+ EXPECT_EQ(transpose->row_blocks()[i], matrix.col_blocks()[i]);
+ }
+
+ ASSERT_EQ(transpose->col_blocks().size(), matrix.row_blocks().size());
+ for (int i = 0; i < transpose->col_blocks().size(); ++i) {
+ EXPECT_EQ(transpose->col_blocks()[i], matrix.row_blocks()[i]);
+ }
+
Matrix dense_matrix;
matrix.ToDenseMatrix(&dense_matrix);
@@ -325,5 +406,170 @@ TEST(CompressedRowSparseMatrix, Transpose) {
EXPECT_NEAR((dense_matrix - dense_transpose.transpose()).norm(), 0.0, 1e-14);
}
+#ifndef CERES_NO_CXSPARSE
+
+struct RandomMatrixOptions {
+ int num_row_blocks;
+ int min_row_block_size;
+ int max_row_block_size;
+ int num_col_blocks;
+ int min_col_block_size;
+ int max_col_block_size;
+ double block_density;
+};
+
+CompressedRowSparseMatrix* CreateRandomCompressedRowSparseMatrix(
+ const RandomMatrixOptions& options) {
+ vector<int> row_blocks;
+ for (int i = 0; i < options.num_row_blocks; ++i) {
+ const int delta_block_size =
+ Uniform(options.max_row_block_size - options.min_row_block_size);
+ row_blocks.push_back(options.min_row_block_size + delta_block_size);
+ }
+
+ vector<int> col_blocks;
+ for (int i = 0; i < options.num_col_blocks; ++i) {
+ const int delta_block_size =
+ Uniform(options.max_col_block_size - options.min_col_block_size);
+ col_blocks.push_back(options.min_col_block_size + delta_block_size);
+ }
+
+ vector<int> rows;
+ vector<int> cols;
+ vector<double> values;
+
+ while (values.size() == 0) {
+ int row_block_begin = 0;
+ for (int r = 0; r < options.num_row_blocks; ++r) {
+ int col_block_begin = 0;
+ for (int c = 0; c < options.num_col_blocks; ++c) {
+ if (RandDouble() <= options.block_density) {
+ for (int i = 0; i < row_blocks[r]; ++i) {
+ for (int j = 0; j < col_blocks[c]; ++j) {
+ rows.push_back(row_block_begin + i);
+ cols.push_back(col_block_begin + j);
+ values.push_back(RandNormal());
+ }
+ }
+ }
+ col_block_begin += col_blocks[c];
+ }
+ row_block_begin += row_blocks[r];
+ }
+ }
+
+ const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0);
+ const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0);
+ const int num_nonzeros = values.size();
+
+ TripletSparseMatrix tsm(num_rows, num_cols, num_nonzeros);
+ std::copy(rows.begin(), rows.end(), tsm.mutable_rows());
+ std::copy(cols.begin(), cols.end(), tsm.mutable_cols());
+ std::copy(values.begin(), values.end(), tsm.mutable_values());
+ tsm.set_num_nonzeros(num_nonzeros);
+ CompressedRowSparseMatrix* matrix = new CompressedRowSparseMatrix(tsm);
+ (*matrix->mutable_row_blocks()) = row_blocks;
+ (*matrix->mutable_col_blocks()) = col_blocks;
+ return matrix;
+}
+
+void ToDenseMatrix(const cs_di* matrix, Matrix* dense_matrix) {
+ dense_matrix->resize(matrix->m, matrix->n);
+ dense_matrix->setZero();
+
+ for (int c = 0; c < matrix->n; ++c) {
+ for (int idx = matrix->p[c]; idx < matrix->p[c + 1]; ++idx) {
+ const int r = matrix->i[idx];
+ (*dense_matrix)(r, c) = matrix->x[idx];
+ }
+ }
+}
+
+TEST(CompressedRowSparseMatrix, ComputeOuterProduct) {
+ // "Randomly generated seed."
+ SetRandomState(29823);
+ int kMaxNumRowBlocks = 10;
+ int kMaxNumColBlocks = 10;
+ int kNumTrials = 10;
+
+ CXSparse cxsparse;
+ const double kTolerance = 1e-18;
+
+ // Create a random matrix, compute its outer product using CXSParse
+ // and ComputeOuterProduct. Convert both matrices to dense matrices
+ // and compare their upper triangular parts. They should be within
+ // kTolerance of each other.
+ for (int num_row_blocks = 1;
+ num_row_blocks < kMaxNumRowBlocks;
+ ++num_row_blocks) {
+ for (int num_col_blocks = 1;
+ num_col_blocks < kMaxNumColBlocks;
+ ++num_col_blocks) {
+ for (int trial = 0; trial < kNumTrials; ++trial) {
+
+
+ RandomMatrixOptions options;
+ options.num_row_blocks = num_row_blocks;
+ options.num_col_blocks = num_col_blocks;
+ options.min_row_block_size = 1;
+ options.max_row_block_size = 5;
+ options.min_col_block_size = 1;
+ options.max_col_block_size = 10;
+ options.block_density = std::max(0.1, RandDouble());
+
+ VLOG(2) << "num row blocks: " << options.num_row_blocks;
+ VLOG(2) << "num col blocks: " << options.num_col_blocks;
+ VLOG(2) << "min row block size: " << options.min_row_block_size;
+ VLOG(2) << "max row block size: " << options.max_row_block_size;
+ VLOG(2) << "min col block size: " << options.min_col_block_size;
+ VLOG(2) << "max col block size: " << options.max_col_block_size;
+ VLOG(2) << "block density: " << options.block_density;
+
+ scoped_ptr<CompressedRowSparseMatrix> matrix(
+ CreateRandomCompressedRowSparseMatrix(options));
+
+ cs_di cs_matrix_transpose = cxsparse.CreateSparseMatrixTransposeView(matrix.get());
+ cs_di* cs_matrix = cxsparse.TransposeMatrix(&cs_matrix_transpose);
+ cs_di* expected_outer_product =
+ cxsparse.MatrixMatrixMultiply(&cs_matrix_transpose, cs_matrix);
+
+ vector<int> program;
+ scoped_ptr<CompressedRowSparseMatrix> outer_product(
+ CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
+ *matrix, &program));
+ CompressedRowSparseMatrix::ComputeOuterProduct(*matrix,
+ program,
+ outer_product.get());
+
+ cs_di actual_outer_product =
+ cxsparse.CreateSparseMatrixTransposeView(outer_product.get());
+
+ ASSERT_EQ(actual_outer_product.m, actual_outer_product.n);
+ ASSERT_EQ(expected_outer_product->m, expected_outer_product->n);
+ ASSERT_EQ(actual_outer_product.m, expected_outer_product->m);
+
+ Matrix actual_matrix;
+ Matrix expected_matrix;
+
+ ToDenseMatrix(expected_outer_product, &expected_matrix);
+ expected_matrix.triangularView<Eigen::StrictlyLower>().setZero();
+
+ ToDenseMatrix(&actual_outer_product, &actual_matrix);
+ const double diff_norm = (actual_matrix - expected_matrix).norm() / expected_matrix.norm();
+ ASSERT_NEAR(diff_norm, 0.0, kTolerance)
+ << "expected: \n"
+ << expected_matrix
+ << "\nactual: \n"
+ << actual_matrix;
+
+ cxsparse.Free(cs_matrix);
+ cxsparse.Free(expected_outer_product);
+ }
+ }
+ }
+}
+
+#endif // CERES_NO_CXSPARSE
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/conjugate_gradients_solver.cc b/internal/ceres/conjugate_gradients_solver.cc
index ae8e877..524cb8a 100644
--- a/internal/ceres/conjugate_gradients_solver.cc
+++ b/internal/ceres/conjugate_gradients_solver.cc
@@ -44,6 +44,7 @@
#include "ceres/fpclassify.h"
#include "ceres/internal/eigen.h"
#include "ceres/linear_operator.h"
+#include "ceres/stringprintf.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -55,9 +56,6 @@ bool IsZeroOrInfinity(double x) {
return ((x == 0.0) || (IsInfinite(x)));
}
-// Constant used in the MATLAB implementation ~ 2 * eps.
-const double kEpsilon = 2.2204e-16;
-
} // namespace
ConjugateGradientsSolver::ConjugateGradientsSolver(
@@ -76,17 +74,19 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
CHECK_EQ(A->num_rows(), A->num_cols());
LinearSolver::Summary summary;
- summary.termination_type = MAX_ITERATIONS;
+ summary.termination_type = LINEAR_SOLVER_NO_CONVERGENCE;
+ summary.message = "Maximum number of iterations reached.";
summary.num_iterations = 0;
- int num_cols = A->num_cols();
+ const int num_cols = A->num_cols();
VectorRef xref(x, num_cols);
ConstVectorRef bref(b, num_cols);
- double norm_b = bref.norm();
+ const double norm_b = bref.norm();
if (norm_b == 0.0) {
xref.setZero();
- summary.termination_type = TOLERANCE;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message = "Convergence. |b| = 0.";
return summary;
}
@@ -95,15 +95,16 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
Vector z(num_cols);
Vector tmp(num_cols);
- double tol_r = per_solve_options.r_tolerance * norm_b;
+ const double tol_r = per_solve_options.r_tolerance * norm_b;
tmp.setZero();
A->RightMultiply(x, tmp.data());
r = bref - tmp;
double norm_r = r.norm();
-
if (norm_r <= tol_r) {
- summary.termination_type = TOLERANCE;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message =
+ StringPrintf("Convergence. |r| = %e <= %e.", norm_r, tol_r);
return summary;
}
@@ -115,8 +116,6 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
for (summary.num_iterations = 1;
summary.num_iterations < options_.max_num_iterations;
++summary.num_iterations) {
- VLOG(3) << "cg iteration " << summary.num_iterations;
-
// Apply preconditioner
if (per_solve_options.preconditioner != NULL) {
z.setZero();
@@ -127,10 +126,9 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
double last_rho = rho;
rho = r.dot(z);
-
if (IsZeroOrInfinity(rho)) {
- LOG(ERROR) << "Numerical failure. rho = " << rho;
- summary.termination_type = FAILURE;
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message = StringPrintf("Numerical failure. rho = r'z = %e.", rho);
break;
};
@@ -139,8 +137,9 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
} else {
double beta = rho / last_rho;
if (IsZeroOrInfinity(beta)) {
- LOG(ERROR) << "Numerical failure. beta = " << beta;
- summary.termination_type = FAILURE;
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message = StringPrintf(
+ "Numerical failure. beta = rho_n / rho_{n-1} = %e.", beta);
break;
}
p = z + beta * p;
@@ -149,18 +148,18 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
Vector& q = z;
q.setZero();
A->RightMultiply(p.data(), q.data());
- double pq = p.dot(q);
-
+ const double pq = p.dot(q);
if ((pq <= 0) || IsInfinite(pq)) {
- LOG(ERROR) << "Numerical failure. pq = " << pq;
- summary.termination_type = FAILURE;
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message = StringPrintf("Numerical failure. p'q = %e.", pq);
break;
}
- double alpha = rho / pq;
+ const double alpha = rho / pq;
if (IsInfinite(alpha)) {
- LOG(ERROR) << "Numerical failure. alpha " << alpha;
- summary.termination_type = FAILURE;
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message =
+ StringPrintf("Numerical failure. alpha = rho / pq = %e", alpha);
break;
}
@@ -183,7 +182,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
// Quadratic model based termination.
// Q1 = x'Ax - 2 * b' x.
- double Q1 = -1.0 * xref.dot(bref + r);
+ const double Q1 = -1.0 * xref.dot(bref + r);
// For PSD matrices A, let
//
@@ -207,21 +206,23 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
// Journal of Computational and Applied Mathematics,
// 124(1-2), 45-59, 2000.
//
- double zeta = summary.num_iterations * (Q1 - Q0) / Q1;
- VLOG(3) << "Q termination: zeta " << zeta
- << " " << per_solve_options.q_tolerance;
+ const double zeta = summary.num_iterations * (Q1 - Q0) / Q1;
if (zeta < per_solve_options.q_tolerance) {
- summary.termination_type = TOLERANCE;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message =
+ StringPrintf("Convergence: zeta = %e < %e",
+ zeta,
+ per_solve_options.q_tolerance);
break;
}
Q0 = Q1;
// Residual based termination.
norm_r = r. norm();
- VLOG(3) << "R termination: norm_r " << norm_r
- << " " << tol_r;
if (norm_r <= tol_r) {
- summary.termination_type = TOLERANCE;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message =
+ StringPrintf("Convergence. |r| = %e <= %e.", norm_r, tol_r);
break;
}
}
diff --git a/internal/ceres/coordinate_descent_minimizer.cc b/internal/ceres/coordinate_descent_minimizer.cc
index c4da987..3b0553e 100644
--- a/internal/ceres/coordinate_descent_minimizer.cc
+++ b/internal/ceres/coordinate_descent_minimizer.cc
@@ -40,15 +40,15 @@
#include "ceres/evaluator.h"
#include "ceres/linear_solver.h"
#include "ceres/minimizer.h"
-#include "ceres/ordered_groups.h"
#include "ceres/parameter_block.h"
+#include "ceres/parameter_block_ordering.h"
#include "ceres/problem_impl.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
#include "ceres/solver.h"
-#include "ceres/solver_impl.h"
#include "ceres/trust_region_minimizer.h"
#include "ceres/trust_region_strategy.h"
+#include "ceres/parameter_block_ordering.h"
namespace ceres {
namespace internal {
@@ -227,10 +227,44 @@ void CoordinateDescentMinimizer::Solve(Program* program,
minimizer_options.evaluator = evaluator.get();
minimizer_options.jacobian = jacobian.get();
minimizer_options.trust_region_strategy = trust_region_strategy.get();
+ minimizer_options.is_silent = true;
TrustRegionMinimizer minimizer;
minimizer.Minimize(minimizer_options, parameter, summary);
}
+bool CoordinateDescentMinimizer::IsOrderingValid(
+ const Program& program,
+ const ParameterBlockOrdering& ordering,
+ string* message) {
+ const map<int, set<double*> >& group_to_elements =
+ ordering.group_to_elements();
+
+ // Verify that each group is an independent set
+ map<int, set<double*> >::const_iterator it = group_to_elements.begin();
+ for ( ; it != group_to_elements.end(); ++it) {
+ if (!program.IsParameterBlockSetIndependent(it->second)) {
+ *message =
+ StringPrintf("The user-provided "
+ "parameter_blocks_for_inner_iterations does not "
+ "form an independent set. Group Id: %d", it->first);
+ return false;
+ }
+ }
+ return true;
+}
+
+// Find a recursive decomposition of the Hessian matrix as a set
+// of independent sets of decreasing size and invert it. This
+// seems to work better in practice, i.e., Cameras before
+// points.
+ParameterBlockOrdering* CoordinateDescentMinimizer::CreateOrdering(
+ const Program& program) {
+ scoped_ptr<ParameterBlockOrdering> ordering(new ParameterBlockOrdering);
+ ComputeRecursiveIndependentSetOrdering(program, ordering.get());
+ ordering->Reverse();
+ return ordering.release();
+}
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/coordinate_descent_minimizer.h b/internal/ceres/coordinate_descent_minimizer.h
index 424acda..e324b38 100644
--- a/internal/ceres/coordinate_descent_minimizer.h
+++ b/internal/ceres/coordinate_descent_minimizer.h
@@ -37,12 +37,13 @@
#include "ceres/evaluator.h"
#include "ceres/minimizer.h"
#include "ceres/problem_impl.h"
-#include "ceres/program.h"
#include "ceres/solver.h"
namespace ceres {
namespace internal {
+class Program;
+
// Given a Program, and a ParameterBlockOrdering which partitions
// (non-exhaustively) the Hessian matrix into independent sets,
// perform coordinate descent on the parameter blocks in the
@@ -66,6 +67,17 @@ class CoordinateDescentMinimizer : public Minimizer {
double* parameters,
Solver::Summary* summary);
+ // Verify that each group in the ordering forms an independent set.
+ static bool IsOrderingValid(const Program& program,
+ const ParameterBlockOrdering& ordering,
+ string* message);
+
+ // Find a recursive decomposition of the Hessian matrix as a set
+ // of independent sets of decreasing size and invert it. This
+ // seems to work better in practice, i.e., Cameras before
+ // points.
+ static ParameterBlockOrdering* CreateOrdering(const Program& program);
+
private:
void Solve(Program* program,
LinearSolver* linear_solver,
diff --git a/internal/ceres/corrector.cc b/internal/ceres/corrector.cc
index 60269a6..581fc6d 100644
--- a/internal/ceres/corrector.cc
+++ b/internal/ceres/corrector.cc
@@ -32,14 +32,14 @@
#include <cstddef>
#include <cmath>
+#include "ceres/internal/eigen.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
-Corrector::Corrector(double sq_norm, const double rho[3]) {
+Corrector::Corrector(const double sq_norm, const double rho[3]) {
CHECK_GE(sq_norm, 0.0);
- CHECK_GT(rho[1], 0.0);
sqrt_rho1_ = sqrt(rho[1]);
// If sq_norm = 0.0, the correction becomes trivial, the residual
@@ -84,6 +84,14 @@ Corrector::Corrector(double sq_norm, const double rho[3]) {
return;
}
+ // We now require that the first derivative of the loss function be
+ // positive only if the second derivative is positive. This is
+ // because when the second derivative is non-positive, we do not use
+ // the second order correction suggested by BANS and instead use a
+ // simpler first order strategy which does not use a division by the
+ // gradient of the loss function.
+ CHECK_GT(rho[1], 0.0);
+
// Calculate the smaller of the two solutions to the equation
//
// 0.5 * alpha^2 - alpha - rho'' / rho' * z'z = 0.
@@ -101,20 +109,25 @@ Corrector::Corrector(double sq_norm, const double rho[3]) {
alpha_sq_norm_ = alpha / sq_norm;
}
-void Corrector::CorrectResiduals(int num_rows, double* residuals) {
+void Corrector::CorrectResiduals(const int num_rows, double* residuals) {
DCHECK(residuals != NULL);
// Equation 11 in BANS.
- for (int r = 0; r < num_rows; ++r) {
- residuals[r] *= residual_scaling_;
- }
+ VectorRef(residuals, num_rows) *= residual_scaling_;
}
-void Corrector::CorrectJacobian(int num_rows,
- int num_cols,
+void Corrector::CorrectJacobian(const int num_rows,
+ const int num_cols,
double* residuals,
double* jacobian) {
DCHECK(residuals != NULL);
DCHECK(jacobian != NULL);
+
+ // The common case (rho[2] <= 0).
+ if (alpha_sq_norm_ == 0.0) {
+ VectorRef(jacobian, num_rows * num_cols) *= sqrt_rho1_;
+ return;
+ }
+
// Equation 11 in BANS.
//
// J = sqrt(rho) * (J - alpha^2 r * r' J)
diff --git a/internal/ceres/corrector_test.cc b/internal/ceres/corrector_test.cc
index 55e7d6b..9355616 100644
--- a/internal/ceres/corrector_test.cc
+++ b/internal/ceres/corrector_test.cc
@@ -43,14 +43,14 @@ namespace internal {
// If rho[1] is zero, the Corrector constructor should crash.
TEST(Corrector, ZeroGradientDeathTest) {
- const double kRho[] = {0.0, 0.0, 0.0};
+ const double kRho[] = {0.0, 0.0, 1.0};
EXPECT_DEATH_IF_SUPPORTED({Corrector c(1.0, kRho);},
".*");
}
// If rho[1] is negative, the Corrector constructor should crash.
TEST(Corrector, NegativeGradientDeathTest) {
- const double kRho[] = {0.0, -0.1, 0.0};
+ const double kRho[] = {0.0, -0.1, 1.0};
EXPECT_DEATH_IF_SUPPORTED({Corrector c(1.0, kRho);},
".*");
}
diff --git a/internal/ceres/cost_function_to_functor_test.cc b/internal/ceres/cost_function_to_functor_test.cc
index 90ccc82..fd828ce 100644
--- a/internal/ceres/cost_function_to_functor_test.cc
+++ b/internal/ceres/cost_function_to_functor_test.cc
@@ -42,9 +42,9 @@ void ExpectCostFunctionsAreEqual(const CostFunction& cost_function,
EXPECT_EQ(cost_function.num_residuals(),
actual_cost_function.num_residuals());
const int num_residuals = cost_function.num_residuals();
- const vector<int16>& parameter_block_sizes =
+ const vector<int32>& parameter_block_sizes =
cost_function.parameter_block_sizes();
- const vector<int16>& actual_parameter_block_sizes =
+ const vector<int32>& actual_parameter_block_sizes =
actual_cost_function.parameter_block_sizes();
EXPECT_EQ(parameter_block_sizes.size(),
actual_parameter_block_sizes.size());
diff --git a/internal/ceres/covariance_impl.cc b/internal/ceres/covariance_impl.cc
index 19d545c..821be49 100644
--- a/internal/ceres/covariance_impl.cc
+++ b/internal/ceres/covariance_impl.cc
@@ -35,8 +35,28 @@
#endif
#include <algorithm>
+#include <cstdlib>
#include <utility>
#include <vector>
+#include "Eigen/SparseCore"
+
+// Suppress unused local variable warning from Eigen Ordering.h #included by
+// SparseQR in Eigen 3.2.0. This was fixed in Eigen 3.2.1, but 3.2.0 is still
+// widely used (Ubuntu 14.04), and Ceres won't compile otherwise due to -Werror.
+#if defined(_MSC_VER)
+#pragma warning( push )
+#pragma warning( disable : 4189 )
+#else
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#endif
+#include "Eigen/SparseQR"
+#if defined(_MSC_VER)
+#pragma warning( pop )
+#else
+#pragma GCC diagnostic pop
+#endif
+
#include "Eigen/SVD"
#include "ceres/compressed_col_sparse_matrix_utils.h"
#include "ceres/compressed_row_sparse_matrix.h"
@@ -52,40 +72,6 @@
namespace ceres {
namespace internal {
-namespace {
-
-// Per thread storage for SuiteSparse.
-#ifndef CERES_NO_SUITESPARSE
-
-struct PerThreadContext {
- explicit PerThreadContext(int num_rows)
- : solution(NULL),
- solution_set(NULL),
- y_workspace(NULL),
- e_workspace(NULL),
- rhs(NULL) {
- rhs = ss.CreateDenseVector(NULL, num_rows, num_rows);
- }
-
- ~PerThreadContext() {
- ss.Free(solution);
- ss.Free(solution_set);
- ss.Free(y_workspace);
- ss.Free(e_workspace);
- ss.Free(rhs);
- }
-
- cholmod_dense* solution;
- cholmod_sparse* solution_set;
- cholmod_dense* y_workspace;
- cholmod_dense* e_workspace;
- cholmod_dense* rhs;
- SuiteSparse ss;
-};
-
-#endif
-
-} // namespace
typedef vector<pair<const double*, const double*> > CovarianceBlocks;
@@ -164,9 +150,9 @@ bool CovarianceImpl::GetCovarianceBlock(const double* original_parameter_block1,
}
if (offset == row_size) {
- LOG(WARNING) << "Unable to find covariance block for "
- << original_parameter_block1 << " "
- << original_parameter_block2;
+ LOG(ERROR) << "Unable to find covariance block for "
+ << original_parameter_block1 << " "
+ << original_parameter_block2;
return false;
}
@@ -347,8 +333,8 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
// values of the parameter blocks. Thus iterating over the keys of
// parameter_block_to_row_index_ corresponds to iterating over the
// rows of the covariance matrix in order.
- int i = 0; // index into covariance_blocks.
- int cursor = 0; // index into the covariance matrix.
+ int i = 0; // index into covariance_blocks.
+ int cursor = 0; // index into the covariance matrix.
for (map<const double*, int>::const_iterator it =
parameter_block_to_row_index_.begin();
it != parameter_block_to_row_index_.end();
@@ -392,14 +378,18 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
bool CovarianceImpl::ComputeCovarianceValues() {
switch (options_.algorithm_type) {
- case (DENSE_SVD):
+ case DENSE_SVD:
return ComputeCovarianceValuesUsingDenseSVD();
#ifndef CERES_NO_SUITESPARSE
- case (SPARSE_CHOLESKY):
- return ComputeCovarianceValuesUsingSparseCholesky();
- case (SPARSE_QR):
- return ComputeCovarianceValuesUsingSparseQR();
+ case SUITE_SPARSE_QR:
+ return ComputeCovarianceValuesUsingSuiteSparseQR();
+#else
+ LOG(ERROR) << "SuiteSparse is required to use the "
+ << "SUITE_SPARSE_QR algorithm.";
+ return false;
#endif
+ case EIGEN_SPARSE_QR:
+ return ComputeCovarianceValuesUsingEigenSparseQR();
default:
LOG(ERROR) << "Unsupported covariance estimation algorithm type: "
<< CovarianceAlgorithmTypeToString(options_.algorithm_type);
@@ -408,186 +398,7 @@ bool CovarianceImpl::ComputeCovarianceValues() {
return false;
}
-bool CovarianceImpl::ComputeCovarianceValuesUsingSparseCholesky() {
- EventLogger event_logger(
- "CovarianceImpl::ComputeCovarianceValuesUsingSparseCholesky");
-#ifndef CERES_NO_SUITESPARSE
- if (covariance_matrix_.get() == NULL) {
- // Nothing to do, all zeros covariance matrix.
- return true;
- }
-
- SuiteSparse ss;
-
- CRSMatrix jacobian;
- problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian);
-
- event_logger.AddEvent("Evaluate");
- // m is a transposed view of the Jacobian.
- cholmod_sparse cholmod_jacobian_view;
- cholmod_jacobian_view.nrow = jacobian.num_cols;
- cholmod_jacobian_view.ncol = jacobian.num_rows;
- cholmod_jacobian_view.nzmax = jacobian.values.size();
- cholmod_jacobian_view.nz = NULL;
- cholmod_jacobian_view.p = reinterpret_cast<void*>(&jacobian.rows[0]);
- cholmod_jacobian_view.i = reinterpret_cast<void*>(&jacobian.cols[0]);
- cholmod_jacobian_view.x = reinterpret_cast<void*>(&jacobian.values[0]);
- cholmod_jacobian_view.z = NULL;
- cholmod_jacobian_view.stype = 0; // Matrix is not symmetric.
- cholmod_jacobian_view.itype = CHOLMOD_INT;
- cholmod_jacobian_view.xtype = CHOLMOD_REAL;
- cholmod_jacobian_view.dtype = CHOLMOD_DOUBLE;
- cholmod_jacobian_view.sorted = 1;
- cholmod_jacobian_view.packed = 1;
-
- cholmod_factor* factor = ss.AnalyzeCholesky(&cholmod_jacobian_view);
- event_logger.AddEvent("Symbolic Factorization");
- bool factorization_succeeded = ss.Cholesky(&cholmod_jacobian_view, factor);
- if (factorization_succeeded) {
- const double reciprocal_condition_number =
- cholmod_rcond(factor, ss.mutable_cc());
- if (reciprocal_condition_number <
- options_.min_reciprocal_condition_number) {
- LOG(WARNING) << "Cholesky factorization of J'J is not reliable. "
- << "Reciprocal condition number: "
- << reciprocal_condition_number << " "
- << "min_reciprocal_condition_number : "
- << options_.min_reciprocal_condition_number;
- factorization_succeeded = false;
- }
- }
-
- event_logger.AddEvent("Numeric Factorization");
- if (!factorization_succeeded) {
- ss.Free(factor);
- LOG(WARNING) << "Cholesky factorization failed.";
- return false;
- }
-
- const int num_rows = covariance_matrix_->num_rows();
- const int* rows = covariance_matrix_->rows();
- const int* cols = covariance_matrix_->cols();
- double* values = covariance_matrix_->mutable_values();
-
- // The following loop exploits the fact that the i^th column of A^{-1}
- // is given by the solution to the linear system
- //
- // A x = e_i
- //
- // where e_i is a vector with e(i) = 1 and all other entries zero.
- //
- // Since the covariance matrix is symmetric, the i^th row and column
- // are equal.
- //
- // The ifdef separates two different version of SuiteSparse. Newer
- // versions of SuiteSparse have the cholmod_solve2 function which
- // re-uses memory across calls.
-#if (SUITESPARSE_VERSION < 4002)
- cholmod_dense* rhs = ss.CreateDenseVector(NULL, num_rows, num_rows);
- double* rhs_x = reinterpret_cast<double*>(rhs->x);
-
- for (int r = 0; r < num_rows; ++r) {
- int row_begin = rows[r];
- int row_end = rows[r + 1];
- if (row_end == row_begin) {
- continue;
- }
-
- rhs_x[r] = 1.0;
- cholmod_dense* solution = ss.Solve(factor, rhs);
- double* solution_x = reinterpret_cast<double*>(solution->x);
- for (int idx = row_begin; idx < row_end; ++idx) {
- const int c = cols[idx];
- values[idx] = solution_x[c];
- }
- ss.Free(solution);
- rhs_x[r] = 0.0;
- }
-
- ss.Free(rhs);
-#else // SUITESPARSE_VERSION < 4002
-
- const int num_threads = options_.num_threads;
- vector<PerThreadContext*> contexts(num_threads);
- for (int i = 0; i < num_threads; ++i) {
- contexts[i] = new PerThreadContext(num_rows);
- }
-
- // The first call to cholmod_solve2 is not thread safe, since it
- // changes the factorization from supernodal to simplicial etc.
- {
- PerThreadContext* context = contexts[0];
- double* context_rhs_x = reinterpret_cast<double*>(context->rhs->x);
- context_rhs_x[0] = 1.0;
- cholmod_solve2(CHOLMOD_A,
- factor,
- context->rhs,
- NULL,
- &context->solution,
- &context->solution_set,
- &context->y_workspace,
- &context->e_workspace,
- context->ss.mutable_cc());
- context_rhs_x[0] = 0.0;
- }
-
-#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
- for (int r = 0; r < num_rows; ++r) {
- int row_begin = rows[r];
- int row_end = rows[r + 1];
- if (row_end == row_begin) {
- continue;
- }
-
-# ifdef CERES_USE_OPENMP
- int thread_id = omp_get_thread_num();
-# else
- int thread_id = 0;
-# endif
-
- PerThreadContext* context = contexts[thread_id];
- double* context_rhs_x = reinterpret_cast<double*>(context->rhs->x);
- context_rhs_x[r] = 1.0;
-
- // TODO(sameeragarwal) There should be a more efficient way
- // involving the use of Bset but I am unable to make it work right
- // now.
- cholmod_solve2(CHOLMOD_A,
- factor,
- context->rhs,
- NULL,
- &context->solution,
- &context->solution_set,
- &context->y_workspace,
- &context->e_workspace,
- context->ss.mutable_cc());
-
- double* solution_x = reinterpret_cast<double*>(context->solution->x);
- for (int idx = row_begin; idx < row_end; ++idx) {
- const int c = cols[idx];
- values[idx] = solution_x[c];
- }
- context_rhs_x[r] = 0.0;
- }
-
- for (int i = 0; i < num_threads; ++i) {
- delete contexts[i];
- }
-
-#endif // SUITESPARSE_VERSION < 4002
-
- ss.Free(factor);
- event_logger.AddEvent("Inversion");
- return true;
-
-#else // CERES_NO_SUITESPARSE
-
- return false;
-
-#endif // CERES_NO_SUITESPARSE
-};
-
-bool CovarianceImpl::ComputeCovarianceValuesUsingSparseQR() {
+bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
EventLogger event_logger(
"CovarianceImpl::ComputeCovarianceValuesUsingSparseQR");
@@ -681,10 +492,10 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSparseQR() {
CHECK_NOTNULL(R);
if (rank < cholmod_jacobian.ncol) {
- LOG(WARNING) << "Jacobian matrix is rank deficient."
- << "Number of columns: " << cholmod_jacobian.ncol
- << " rank: " << rank;
- delete []permutation;
+ LOG(ERROR) << "Jacobian matrix is rank deficient. "
+ << "Number of columns: " << cholmod_jacobian.ncol
+ << " rank: " << rank;
+ free(permutation);
cholmod_l_free_sparse(&R, &cc);
cholmod_l_finish(&cc);
return false;
@@ -739,7 +550,7 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSparseQR() {
}
}
- delete []permutation;
+ free(permutation);
cholmod_l_free_sparse(&R, &cc);
cholmod_l_finish(&cc);
event_logger.AddEvent("Inversion");
@@ -807,11 +618,11 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD() {
if (automatic_truncation) {
break;
} else {
- LOG(WARNING) << "Cholesky factorization of J'J is not reliable. "
- << "Reciprocal condition number: "
- << singular_value_ratio * singular_value_ratio << " "
- << "min_reciprocal_condition_number : "
- << options_.min_reciprocal_condition_number;
+ LOG(ERROR) << "Cholesky factorization of J'J is not reliable. "
+ << "Reciprocal condition number: "
+ << singular_value_ratio * singular_value_ratio << " "
+ << "min_reciprocal_condition_number: "
+ << options_.min_reciprocal_condition_number;
return false;
}
}
@@ -839,7 +650,102 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD() {
}
event_logger.AddEvent("CopyToCovarianceMatrix");
return true;
-};
+}
+
+bool CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR() {
+ EventLogger event_logger(
+ "CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR");
+ if (covariance_matrix_.get() == NULL) {
+ // Nothing to do, all zeros covariance matrix.
+ return true;
+ }
+
+ CRSMatrix jacobian;
+ problem_->Evaluate(evaluate_options_, NULL, NULL, NULL, &jacobian);
+ event_logger.AddEvent("Evaluate");
+
+ typedef Eigen::SparseMatrix<double, Eigen::ColMajor> EigenSparseMatrix;
+
+ // Convert the matrix to column major order as required by SparseQR.
+ EigenSparseMatrix sparse_jacobian =
+ Eigen::MappedSparseMatrix<double, Eigen::RowMajor>(
+ jacobian.num_rows, jacobian.num_cols,
+ static_cast<int>(jacobian.values.size()),
+ jacobian.rows.data(), jacobian.cols.data(), jacobian.values.data());
+ event_logger.AddEvent("ConvertToSparseMatrix");
+
+ Eigen::SparseQR<EigenSparseMatrix, Eigen::COLAMDOrdering<int> >
+ qr_solver(sparse_jacobian);
+ event_logger.AddEvent("QRDecomposition");
+
+ if(qr_solver.info() != Eigen::Success) {
+ LOG(ERROR) << "Eigen::SparseQR decomposition failed.";
+ return false;
+ }
+
+ if (qr_solver.rank() < jacobian.num_cols) {
+ LOG(ERROR) << "Jacobian matrix is rank deficient. "
+ << "Number of columns: " << jacobian.num_cols
+ << " rank: " << qr_solver.rank();
+ return false;
+ }
+
+ const int* rows = covariance_matrix_->rows();
+ const int* cols = covariance_matrix_->cols();
+ double* values = covariance_matrix_->mutable_values();
+
+ // Compute the inverse column permutation used by QR factorization.
+ Eigen::PermutationMatrix<Eigen::Dynamic, Eigen::Dynamic> inverse_permutation =
+ qr_solver.colsPermutation().inverse();
+
+ // The following loop exploits the fact that the i^th column of A^{-1}
+ // is given by the solution to the linear system
+ //
+ // A x = e_i
+ //
+ // where e_i is a vector with e(i) = 1 and all other entries zero.
+ //
+ // Since the covariance matrix is symmetric, the i^th row and column
+ // are equal.
+ const int num_cols = jacobian.num_cols;
+ const int num_threads = options_.num_threads;
+ scoped_array<double> workspace(new double[num_threads * num_cols]);
+
+#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
+ for (int r = 0; r < num_cols; ++r) {
+ const int row_begin = rows[r];
+ const int row_end = rows[r + 1];
+ if (row_end == row_begin) {
+ continue;
+ }
+
+# ifdef CERES_USE_OPENMP
+ int thread_id = omp_get_thread_num();
+# else
+ int thread_id = 0;
+# endif
+
+ double* solution = workspace.get() + thread_id * num_cols;
+ SolveRTRWithSparseRHS<int>(
+ num_cols,
+ qr_solver.matrixR().innerIndexPtr(),
+ qr_solver.matrixR().outerIndexPtr(),
+ &qr_solver.matrixR().data().value(0),
+ inverse_permutation.indices().coeff(r),
+ solution);
+
+ // Assign the values of the computed covariance using the
+ // inverse permutation used in the QR factorization.
+ for (int idx = row_begin; idx < row_end; ++idx) {
+ const int c = cols[idx];
+ values[idx] = solution[inverse_permutation.indices().coeff(c)];
+ }
+ }
+
+ event_logger.AddEvent("Inverse");
+
+ return true;
+}
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/covariance_impl.h b/internal/ceres/covariance_impl.h
index 0e7e217..135f4a1 100644
--- a/internal/ceres/covariance_impl.h
+++ b/internal/ceres/covariance_impl.h
@@ -64,9 +64,9 @@ class CovarianceImpl {
ProblemImpl* problem);
bool ComputeCovarianceValues();
- bool ComputeCovarianceValuesUsingSparseCholesky();
- bool ComputeCovarianceValuesUsingSparseQR();
bool ComputeCovarianceValuesUsingDenseSVD();
+ bool ComputeCovarianceValuesUsingSuiteSparseQR();
+ bool ComputeCovarianceValuesUsingEigenSparseQR();
const CompressedRowSparseMatrix* covariance_matrix() const {
return covariance_matrix_.get();
diff --git a/internal/ceres/covariance_test.cc b/internal/ceres/covariance_test.cc
index f3a5051..6c506b7 100644
--- a/internal/ceres/covariance_test.cc
+++ b/internal/ceres/covariance_test.cc
@@ -125,7 +125,7 @@ TEST(CovarianceImpl, ComputeCovarianceSparsity) {
class UnaryCostFunction: public CostFunction {
public:
UnaryCostFunction(const int num_residuals,
- const int16 parameter_block_size,
+ const int32 parameter_block_size,
const double* jacobian)
: jacobian_(jacobian, jacobian + num_residuals * parameter_block_size) {
set_num_residuals(num_residuals);
@@ -158,8 +158,8 @@ class UnaryCostFunction: public CostFunction {
class BinaryCostFunction: public CostFunction {
public:
BinaryCostFunction(const int num_residuals,
- const int16 parameter_block1_size,
- const int16 parameter_block2_size,
+ const int32 parameter_block1_size,
+ const int32 parameter_block2_size,
const double* jacobian1,
const double* jacobian2)
: jacobian1_(jacobian1,
@@ -400,15 +400,15 @@ TEST_F(CovarianceTest, NormalBehavior) {
Covariance::Options options;
#ifndef CERES_NO_SUITESPARSE
- options.algorithm_type = SPARSE_CHOLESKY;
- ComputeAndCompareCovarianceBlocks(options, expected_covariance);
-
- options.algorithm_type = SPARSE_QR;
+ options.algorithm_type = SUITE_SPARSE_QR;
ComputeAndCompareCovarianceBlocks(options, expected_covariance);
#endif
options.algorithm_type = DENSE_SVD;
ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+ options.algorithm_type = EIGEN_SPARSE_QR;
+ ComputeAndCompareCovarianceBlocks(options, expected_covariance);
}
#ifdef CERES_USE_OPENMP
@@ -448,15 +448,15 @@ TEST_F(CovarianceTest, ThreadedNormalBehavior) {
options.num_threads = 4;
#ifndef CERES_NO_SUITESPARSE
- options.algorithm_type = SPARSE_CHOLESKY;
- ComputeAndCompareCovarianceBlocks(options, expected_covariance);
-
- options.algorithm_type = SPARSE_QR;
+ options.algorithm_type = SUITE_SPARSE_QR;
ComputeAndCompareCovarianceBlocks(options, expected_covariance);
#endif
options.algorithm_type = DENSE_SVD;
ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+ options.algorithm_type = EIGEN_SPARSE_QR;
+ ComputeAndCompareCovarianceBlocks(options, expected_covariance);
}
#endif // CERES_USE_OPENMP
@@ -497,15 +497,15 @@ TEST_F(CovarianceTest, ConstantParameterBlock) {
Covariance::Options options;
#ifndef CERES_NO_SUITESPARSE
- options.algorithm_type = SPARSE_CHOLESKY;
- ComputeAndCompareCovarianceBlocks(options, expected_covariance);
-
- options.algorithm_type = SPARSE_QR;
+ options.algorithm_type = SUITE_SPARSE_QR;
ComputeAndCompareCovarianceBlocks(options, expected_covariance);
#endif
options.algorithm_type = DENSE_SVD;
ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+ options.algorithm_type = EIGEN_SPARSE_QR;
+ ComputeAndCompareCovarianceBlocks(options, expected_covariance);
}
TEST_F(CovarianceTest, LocalParameterization) {
@@ -553,15 +553,15 @@ TEST_F(CovarianceTest, LocalParameterization) {
Covariance::Options options;
#ifndef CERES_NO_SUITESPARSE
- options.algorithm_type = SPARSE_CHOLESKY;
- ComputeAndCompareCovarianceBlocks(options, expected_covariance);
-
- options.algorithm_type = SPARSE_QR;
+ options.algorithm_type = SUITE_SPARSE_QR;
ComputeAndCompareCovarianceBlocks(options, expected_covariance);
#endif
options.algorithm_type = DENSE_SVD;
ComputeAndCompareCovarianceBlocks(options, expected_covariance);
+
+ options.algorithm_type = EIGEN_SPARSE_QR;
+ ComputeAndCompareCovarianceBlocks(options, expected_covariance);
}
@@ -727,7 +727,7 @@ class LargeScaleCovarianceTest : public ::testing::Test {
parameter_block_size_,
jacobian.data()),
NULL,
- block_i );
+ block_i);
for (int j = i; j < num_parameter_blocks_; ++j) {
double* block_j = parameters_.get() + j * parameter_block_size_;
all_covariance_blocks_.push_back(make_pair(block_i, block_j));
@@ -781,8 +781,7 @@ class LargeScaleCovarianceTest : public ::testing::Test {
#if !defined(CERES_NO_SUITESPARSE) && defined(CERES_USE_OPENMP)
TEST_F(LargeScaleCovarianceTest, Parallel) {
- ComputeAndCompare(SPARSE_CHOLESKY, 4);
- ComputeAndCompare(SPARSE_QR, 4);
+ ComputeAndCompare(SUITE_SPARSE_QR, 4);
}
#endif // !defined(CERES_NO_SUITESPARSE) && defined(CERES_USE_OPENMP)
diff --git a/internal/ceres/cxsparse.cc b/internal/ceres/cxsparse.cc
index c6d7743..87503d0 100644
--- a/internal/ceres/cxsparse.cc
+++ b/internal/ceres/cxsparse.cc
@@ -28,6 +28,9 @@
//
// Author: strandmark@google.com (Petter Strandmark)
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_NO_CXSPARSE
#include "ceres/cxsparse.h"
@@ -175,8 +178,8 @@ cs_di CXSparse::CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A) {
cs_di* CXSparse::CreateSparseMatrix(TripletSparseMatrix* tsm) {
cs_di_sparse tsm_wrapper;
- tsm_wrapper.nzmax = tsm->num_nonzeros();;
- tsm_wrapper.nz = tsm->num_nonzeros();;
+ tsm_wrapper.nzmax = tsm->num_nonzeros();
+ tsm_wrapper.nz = tsm->num_nonzeros();
tsm_wrapper.m = tsm->num_rows();
tsm_wrapper.n = tsm->num_cols();
tsm_wrapper.p = tsm->mutable_cols();
diff --git a/internal/ceres/cxsparse.h b/internal/ceres/cxsparse.h
index cd87908..5868401 100644
--- a/internal/ceres/cxsparse.h
+++ b/internal/ceres/cxsparse.h
@@ -31,11 +31,13 @@
#ifndef CERES_INTERNAL_CXSPARSE_H_
#define CERES_INTERNAL_CXSPARSE_H_
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_NO_CXSPARSE
#include <vector>
#include "cs.h"
-#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
@@ -127,9 +129,13 @@ class CXSparse {
#else // CERES_NO_CXSPARSE
-class CXSparse {};
typedef void cs_dis;
+class CXSparse {
+ public:
+ void Free(void*) {};
+
+};
#endif // CERES_NO_CXSPARSE
#endif // CERES_INTERNAL_CXSPARSE_H_
diff --git a/internal/ceres/dense_normal_cholesky_solver.cc b/internal/ceres/dense_normal_cholesky_solver.cc
index fbf3cbe..f44d6da 100644
--- a/internal/ceres/dense_normal_cholesky_solver.cc
+++ b/internal/ceres/dense_normal_cholesky_solver.cc
@@ -95,9 +95,19 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveUsingEigen(
LinearSolver::Summary summary;
summary.num_iterations = 1;
- summary.termination_type = TOLERANCE;
- VectorRef(x, num_cols) =
- lhs.selfadjointView<Eigen::Upper>().llt().solve(rhs);
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ Eigen::LLT<Matrix, Eigen::Upper> llt =
+ lhs.selfadjointView<Eigen::Upper>().llt();
+
+ if (llt.info() != Eigen::Success) {
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message = "Eigen LLT decomposition failed.";
+ } else {
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message = "Success.";
+ }
+
+ VectorRef(x, num_cols) = llt.solve(rhs);
event_logger.AddEvent("Solve");
return summary;
}
@@ -142,14 +152,14 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveUsingLAPACK(
A->matrix().transpose() * ConstVectorRef(b, A->num_rows());
event_logger.AddEvent("Product");
- const int info = LAPACK::SolveInPlaceUsingCholesky(num_cols, lhs.data(), x);
- event_logger.AddEvent("Solve");
-
LinearSolver::Summary summary;
summary.num_iterations = 1;
- summary.termination_type = info == 0 ? TOLERANCE : FAILURE;
-
- event_logger.AddEvent("TearDown");
+ summary.termination_type =
+ LAPACK::SolveInPlaceUsingCholesky(num_cols,
+ lhs.data(),
+ x,
+ &summary.message);
+ event_logger.AddEvent("Solve");
return summary;
}
} // namespace internal
diff --git a/internal/ceres/dense_qr_solver.cc b/internal/ceres/dense_qr_solver.cc
index d76d58b..4388357 100644
--- a/internal/ceres/dense_qr_solver.cc
+++ b/internal/ceres/dense_qr_solver.cc
@@ -60,6 +60,7 @@ LinearSolver::Summary DenseQRSolver::SolveImpl(
return SolveUsingLAPACK(A, b, per_solve_options, x);
}
}
+
LinearSolver::Summary DenseQRSolver::SolveUsingLAPACK(
DenseSparseMatrix* A,
const double* b,
@@ -100,21 +101,18 @@ LinearSolver::Summary DenseQRSolver::SolveUsingLAPACK(
work_.resize(work_size);
}
- const int info = LAPACK::SolveUsingQR(lhs_.rows(),
- lhs_.cols(),
- lhs_.data(),
- work_.rows(),
- work_.data(),
- rhs_.data());
- event_logger.AddEvent("Solve");
-
LinearSolver::Summary summary;
summary.num_iterations = 1;
- if (info == 0) {
+ summary.termination_type = LAPACK::SolveInPlaceUsingQR(lhs_.rows(),
+ lhs_.cols(),
+ lhs_.data(),
+ work_.rows(),
+ work_.data(),
+ rhs_.data(),
+ &summary.message);
+ event_logger.AddEvent("Solve");
+ if (summary.termination_type == LINEAR_SOLVER_SUCCESS) {
VectorRef(x, num_cols) = rhs_.head(num_cols);
- summary.termination_type = TOLERANCE;
- } else {
- summary.termination_type = FAILURE;
}
event_logger.AddEvent("TearDown");
@@ -161,7 +159,8 @@ LinearSolver::Summary DenseQRSolver::SolveUsingEigen(
// is good enough or not.
LinearSolver::Summary summary;
summary.num_iterations = 1;
- summary.termination_type = TOLERANCE;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message = "Success.";
event_logger.AddEvent("TearDown");
return summary;
diff --git a/internal/ceres/dogleg_strategy.cc b/internal/ceres/dogleg_strategy.cc
index c85c8e5..f29376d 100644
--- a/internal/ceres/dogleg_strategy.cc
+++ b/internal/ceres/dogleg_strategy.cc
@@ -99,7 +99,7 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
}
TrustRegionStrategy::Summary summary;
summary.num_iterations = 0;
- summary.termination_type = TOLERANCE;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
return summary;
}
@@ -135,7 +135,11 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
summary.num_iterations = linear_solver_summary.num_iterations;
summary.termination_type = linear_solver_summary.termination_type;
- if (linear_solver_summary.termination_type != FAILURE) {
+ if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+ return summary;
+ }
+
+ if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) {
switch (dogleg_type_) {
// Interpolate the Cauchy point and the Gauss-Newton step.
case TRADITIONAL_DOGLEG:
@@ -146,7 +150,7 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
// Cauchy point and the (Gauss-)Newton step.
case SUBSPACE_DOGLEG:
if (!ComputeSubspaceModel(jacobian)) {
- summary.termination_type = FAILURE;
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
break;
}
ComputeSubspaceDoglegStep(step);
@@ -513,7 +517,7 @@ LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep(
const double* residuals) {
const int n = jacobian->num_cols();
LinearSolver::Summary linear_solver_summary;
- linear_solver_summary.termination_type = FAILURE;
+ linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
// The Jacobian matrix is often quite poorly conditioned. Thus it is
// necessary to add a diagonal matrix at the bottom to prevent the
@@ -526,7 +530,7 @@ LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep(
// If the solve fails, the multiplier to the diagonal is increased
// up to max_mu_ by a factor of mu_increase_factor_ every time. If
// the linear solver is still not successful, the strategy returns
- // with FAILURE.
+ // with LINEAR_SOLVER_FAILURE.
//
// Next time when a new Gauss-Newton step is requested, the
// multiplier starts out from the last successful solve.
@@ -579,17 +583,21 @@ LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep(
}
}
- if (linear_solver_summary.termination_type == FAILURE ||
+ if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+ return linear_solver_summary;
+ }
+
+ if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE ||
!IsArrayValid(n, gauss_newton_step_.data())) {
mu_ *= mu_increase_factor_;
VLOG(2) << "Increasing mu " << mu_;
- linear_solver_summary.termination_type = FAILURE;
+ linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
continue;
}
break;
}
- if (linear_solver_summary.termination_type != FAILURE) {
+ if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) {
// The scaled Gauss-Newton step is D * GN:
//
// - (D^-1 J^T J D^-1)^-1 (D^-1 g)
diff --git a/internal/ceres/dogleg_strategy_test.cc b/internal/ceres/dogleg_strategy_test.cc
index ace635f..795719d 100644
--- a/internal/ceres/dogleg_strategy_test.cc
+++ b/internal/ceres/dogleg_strategy_test.cc
@@ -144,7 +144,7 @@ TEST_F(DoglegStrategyFixtureEllipse, TrustRegionObeyedTraditional) {
residual_.data(),
x_.data());
- EXPECT_NE(summary.termination_type, FAILURE);
+ EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
EXPECT_LE(x_.norm(), options_.initial_radius * (1.0 + 4.0 * kEpsilon));
}
@@ -164,7 +164,7 @@ TEST_F(DoglegStrategyFixtureEllipse, TrustRegionObeyedSubspace) {
residual_.data(),
x_.data());
- EXPECT_NE(summary.termination_type, FAILURE);
+ EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
EXPECT_LE(x_.norm(), options_.initial_radius * (1.0 + 4.0 * kEpsilon));
}
@@ -184,7 +184,7 @@ TEST_F(DoglegStrategyFixtureEllipse, CorrectGaussNewtonStep) {
residual_.data(),
x_.data());
- EXPECT_NE(summary.termination_type, FAILURE);
+ EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
EXPECT_NEAR(x_(0), 1.0, kToleranceLoose);
EXPECT_NEAR(x_(1), 1.0, kToleranceLoose);
EXPECT_NEAR(x_(2), 1.0, kToleranceLoose);
@@ -246,7 +246,7 @@ TEST_F(DoglegStrategyFixtureValley, CorrectStepLocalOptimumAlongGradient) {
residual_.data(),
x_.data());
- EXPECT_NE(summary.termination_type, FAILURE);
+ EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
EXPECT_NEAR(x_(0), 0.0, kToleranceLoose);
EXPECT_NEAR(x_(1), 0.0, kToleranceLoose);
EXPECT_NEAR(x_(2), options_.initial_radius, kToleranceLoose);
@@ -274,7 +274,7 @@ TEST_F(DoglegStrategyFixtureValley, CorrectStepGlobalOptimumAlongGradient) {
residual_.data(),
x_.data());
- EXPECT_NE(summary.termination_type, FAILURE);
+ EXPECT_NE(summary.termination_type, LINEAR_SOLVER_FAILURE);
EXPECT_NEAR(x_(0), 0.0, kToleranceLoose);
EXPECT_NEAR(x_(1), 0.0, kToleranceLoose);
EXPECT_NEAR(x_(2), 1.0, kToleranceLoose);
diff --git a/internal/ceres/dynamic_compressed_row_finalizer.h b/internal/ceres/dynamic_compressed_row_finalizer.h
new file mode 100644
index 0000000..5e6b0d8
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_finalizer.h
@@ -0,0 +1,51 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+
+#ifndef CERES_INTERNAL_DYNAMIC_COMPRESED_ROW_FINALIZER_H_
+#define CERES_INTERNAL_DYNAMIC_COMPRESED_ROW_FINALIZER_H_
+
+#include "ceres/casts.h"
+#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+struct DynamicCompressedRowJacobianFinalizer {
+ void operator()(SparseMatrix* base_jacobian, int num_parameters) {
+ DynamicCompressedRowSparseMatrix* jacobian =
+ down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
+ jacobian->Finalize(num_parameters);
+ }
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DYNAMIC_COMPRESED_ROW_FINALISER_H_
diff --git a/internal/ceres/dynamic_compressed_row_jacobian_writer.cc b/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
new file mode 100644
index 0000000..2f01617
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
@@ -0,0 +1,107 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+
+#include "ceres/compressed_row_jacobian_writer.h"
+#include "ceres/dynamic_compressed_row_jacobian_writer.h"
+#include "ceres/casts.h"
+#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+
+namespace ceres {
+namespace internal {
+
+ScratchEvaluatePreparer*
+DynamicCompressedRowJacobianWriter::CreateEvaluatePreparers(int num_threads) {
+ return ScratchEvaluatePreparer::Create(*program_, num_threads);
+}
+
+SparseMatrix* DynamicCompressedRowJacobianWriter::CreateJacobian() const {
+ // Initialize `jacobian` with zero number of `max_num_nonzeros`.
+ const int num_residuals = program_->NumResiduals();
+ const int num_effective_parameters = program_->NumEffectiveParameters();
+
+ DynamicCompressedRowSparseMatrix* jacobian =
+ new DynamicCompressedRowSparseMatrix(num_residuals,
+ num_effective_parameters,
+ 0);
+
+ CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
+ program_, jacobian);
+
+ return jacobian;
+}
+
+void DynamicCompressedRowJacobianWriter::Write(int residual_id,
+ int residual_offset,
+ double **jacobians,
+ SparseMatrix* base_jacobian) {
+ DynamicCompressedRowSparseMatrix* jacobian =
+ down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
+
+ // Get the `residual_block` of interest.
+ const ResidualBlock* residual_block =
+ program_->residual_blocks()[residual_id];
+ const int num_residuals = residual_block->NumResiduals();
+
+ vector<pair<int, int> > evaluated_jacobian_blocks;
+ CompressedRowJacobianWriter::GetOrderedParameterBlocks(
+ program_, residual_id, &evaluated_jacobian_blocks);
+
+ // `residual_offset` is the residual row in the global jacobian.
+ // Empty the jacobian rows.
+ jacobian->ClearRows(residual_offset, num_residuals);
+
+ // Iterate over each parameter block.
+ for (int i = 0; i < evaluated_jacobian_blocks.size(); ++i) {
+ const ParameterBlock* parameter_block =
+ program_->parameter_blocks()[evaluated_jacobian_blocks[i].first];
+ const int parameter_block_jacobian_index =
+ evaluated_jacobian_blocks[i].second;
+ const int parameter_block_size = parameter_block->LocalSize();
+
+ // For each parameter block only insert its non-zero entries.
+ for (int r = 0; r < num_residuals; ++r) {
+ for (int c = 0; c < parameter_block_size; ++c) {
+ const double& v = jacobians[parameter_block_jacobian_index][
+ r * parameter_block_size + c];
+ // Only insert non-zero entries.
+ if (v != 0.0) {
+ jacobian->InsertEntry(
+ residual_offset + r, parameter_block->delta_offset() + c, v);
+ }
+ }
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/dynamic_compressed_row_jacobian_writer.h b/internal/ceres/dynamic_compressed_row_jacobian_writer.h
new file mode 100644
index 0000000..df9581b
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_jacobian_writer.h
@@ -0,0 +1,83 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+//
+// A jacobian writer that directly writes to dynamic compressed row sparse
+// matrices.
+
+#ifndef CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_JACOBIAN_WRITER_H_
+#define CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_JACOBIAN_WRITER_H_
+
+#include "ceres/evaluator.h"
+#include "ceres/scratch_evaluate_preparer.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+class SparseMatrix;
+
+class DynamicCompressedRowJacobianWriter {
+ public:
+ DynamicCompressedRowJacobianWriter(Evaluator::Options /* ignored */,
+ Program* program)
+ : program_(program) {
+ }
+
+ // JacobianWriter interface.
+
+ // The compressed row matrix has different layout than that assumed by
+ // the cost functions. The scratch space is therefore used to store
+ // the jacobians (including zeros) temporarily before only the non-zero
+ // entries are copied over to the larger jacobian in `Write`.
+ ScratchEvaluatePreparer* CreateEvaluatePreparers(int num_threads);
+
+ // Return a `DynamicCompressedRowSparseMatrix` which is filled by
+ // `Write`. Note that `Finalize` must be called to make the
+ // `CompressedRowSparseMatrix` interface valid.
+ SparseMatrix* CreateJacobian() const;
+
+ // Write only the non-zero jacobian entries for a residual block
+ // (specified by `residual_id`) into `base_jacobian`, starting at the row
+ // specifed by `residual_offset`.
+ //
+ // This method is thread-safe over residual blocks (each `residual_id`).
+ void Write(int residual_id,
+ int residual_offset,
+ double **jacobians,
+ SparseMatrix* base_jacobian);
+
+ private:
+ Program* program_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_JACOBIAN_WRITER_H_
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix.cc b/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
new file mode 100644
index 0000000..f285d52
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
@@ -0,0 +1,107 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+
+#include <cstring>
+#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+DynamicCompressedRowSparseMatrix::DynamicCompressedRowSparseMatrix(
+ int num_rows,
+ int num_cols,
+ int initial_max_num_nonzeros)
+ : CompressedRowSparseMatrix(num_rows,
+ num_cols,
+ initial_max_num_nonzeros) {
+ dynamic_cols_.resize(num_rows);
+ dynamic_values_.resize(num_rows);
+ }
+
+void DynamicCompressedRowSparseMatrix::InsertEntry(int row,
+ int col,
+ const double& value) {
+ CHECK_GE(row, 0);
+ CHECK_LT(row, num_rows());
+ CHECK_GE(col, 0);
+ CHECK_LT(col, num_cols());
+ dynamic_cols_[row].push_back(col);
+ dynamic_values_[row].push_back(value);
+}
+
+void DynamicCompressedRowSparseMatrix::ClearRows(int row_start,
+ int num_rows) {
+ for (int r = 0; r < num_rows; ++r) {
+ const int i = row_start + r;
+ CHECK_GE(i, 0);
+ CHECK_LT(i, this->num_rows());
+ dynamic_cols_[i].resize(0);
+ dynamic_values_[i].resize(0);
+ }
+}
+
+void DynamicCompressedRowSparseMatrix::Finalize(int num_additional_elements) {
+ // `num_additional_elements` is provided as an argument so that additional
+ // storage can be reserved when it is known by the finalizer.
+ CHECK_GE(num_additional_elements, 0);
+
+ // Count the number of non-zeros and resize `cols_` and `values_`.
+ int num_jacobian_nonzeros = 0;
+ for (int i = 0; i < dynamic_cols_.size(); ++i) {
+ num_jacobian_nonzeros += dynamic_cols_[i].size();
+ }
+
+ SetMaxNumNonZeros(num_jacobian_nonzeros + num_additional_elements);
+
+ // Flatten `dynamic_cols_` into `cols_` and `dynamic_values_`
+ // into `values_`.
+ int index_into_values_and_cols = 0;
+ for (int i = 0; i < num_rows(); ++i) {
+ mutable_rows()[i] = index_into_values_and_cols;
+ const int num_nonzero_columns = dynamic_cols_[i].size();
+ if (num_nonzero_columns > 0) {
+ memcpy(mutable_cols() + index_into_values_and_cols,
+ &dynamic_cols_[i][0],
+ dynamic_cols_[i].size() * sizeof(dynamic_cols_[0][0]));
+ memcpy(mutable_values() + index_into_values_and_cols,
+ &dynamic_values_[i][0],
+ dynamic_values_[i].size() * sizeof(dynamic_values_[0][0]));
+ index_into_values_and_cols += dynamic_cols_[i].size();
+ }
+ }
+ mutable_rows()[num_rows()] = index_into_values_and_cols;
+
+ CHECK_EQ(index_into_values_and_cols, num_jacobian_nonzeros)
+ << "Ceres bug: final index into values_ and cols_ should be equal to "
+ << "the number of jacobian nonzeros. Please contact the developers!";
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix.h b/internal/ceres/dynamic_compressed_row_sparse_matrix.h
new file mode 100644
index 0000000..7a89a70
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix.h
@@ -0,0 +1,99 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+//
+// A compressed row sparse matrix that provides an extended interface to
+// allow dynamic insertion of entries. This is provided for the use case
+// where the sparsity structure and number of non-zero entries is dynamic.
+// This flexibility is achieved by using an (internal) scratch space that
+// allows independent insertion of entries into each row (thread-safe).
+// Once insertion is complete, the `Finalize` method must be called to ensure
+// that the underlying `CompressedRowSparseMatrix` is consistent.
+//
+// This should only be used if you really do need a dynamic sparsity pattern.
+
+#ifndef CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_SPARSE_MATRIX_H_
+
+#include "ceres/compressed_row_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+class DynamicCompressedRowSparseMatrix : public CompressedRowSparseMatrix {
+ public:
+ // Set the number of rows and columns for the underlyig
+ // `CompressedRowSparseMatrix` and set the initial number of maximum non-zero
+ // entries. Note that following the insertion of entries, when `Finalize`
+ // is called the number of non-zeros is determined and all internal
+ // structures are adjusted as required. If you know the upper limit on the
+ // number of non-zeros, then passing this value here can prevent future
+ // memory reallocations which may improve performance. Otherwise, if no
+ // upper limit is available a value of 0 is sufficient.
+ //
+ // Typical usage of this class is to define a new instance with a given
+ // number of rows, columns and maximum number of non-zero elements
+ // (if available). Next, entries are inserted at row and column positions
+ // using `InsertEntry`. Finally, once all elements have been inserted,
+ // `Finalize` must be called to make the underlying
+ // `CompressedRowSparseMatrix` consistent.
+ DynamicCompressedRowSparseMatrix(int num_rows,
+ int num_cols,
+ int initial_max_num_nonzeros);
+
+ // Insert an entry at a given row and column position. This method is
+ // thread-safe across rows i.e. different threads can insert values
+ // simultaneously into different rows. It should be emphasised that this
+ // method always inserts a new entry and does not check for existing
+ // entries at the specified row and column position. Duplicate entries
+ // for a given row and column position will result in undefined
+ // behavior.
+ void InsertEntry(int row, int col, const double& value);
+
+ // Clear all entries for rows, starting from row index `row_start`
+ // and proceeding for `num_rows`.
+ void ClearRows(int row_start, int num_rows);
+
+ // Make the underlying internal `CompressedRowSparseMatrix` data structures
+ // consistent. Additional space for non-zero entries in the
+ // `CompressedRowSparseMatrix` can be reserved by specifying
+ // `num_additional_elements`. This is useful when it is known that rows will
+ // be appended to the `CompressedRowSparseMatrix` (e.g. appending a diagonal
+ // matrix to the jacobian) as it prevents need for future reallocation.
+ void Finalize(int num_additional_elements);
+
+ private:
+ vector<vector<int> > dynamic_cols_;
+ vector<vector<double> > dynamic_values_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DYNAMIC_COMPRESSED_ROW_SPARSE_MATRIX_H_
diff --git a/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc b/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
new file mode 100644
index 0000000..03bfcb6
--- /dev/null
+++ b/internal/ceres/dynamic_compressed_row_sparse_matrix_test.cc
@@ -0,0 +1,217 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: richie.stebbing@gmail.com (Richard Stebbing)
+
+#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+class DynamicCompressedRowSparseMatrixTest : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ num_rows = 7;
+ num_cols = 4;
+
+ // The number of additional elements reserved when `Finalize` is called
+ // should have no effect on the number of rows, columns or nonzeros.
+ // Set this to some nonzero value to be sure.
+ num_additional_elements = 13;
+
+ expected_num_nonzeros = num_rows * num_cols - min(num_rows, num_cols);
+
+ InitialiseDenseReference();
+ InitialiseSparseMatrixReferences();
+
+ dcrsm.reset(new DynamicCompressedRowSparseMatrix(num_rows,
+ num_cols,
+ 0));
+ }
+
+ void Finalize() {
+ dcrsm->Finalize(num_additional_elements);
+ }
+
+ void InitialiseDenseReference() {
+ dense.resize(num_rows, num_cols);
+ dense.setZero();
+ int num_nonzeros = 0;
+ for (int i = 0; i < (num_rows * num_cols); ++i) {
+ const int r = i / num_cols, c = i % num_cols;
+ if (r != c) {
+ dense(r, c) = i + 1;
+ ++num_nonzeros;
+ }
+ }
+ ASSERT_EQ(num_nonzeros, expected_num_nonzeros);
+ }
+
+ void InitialiseSparseMatrixReferences() {
+ std::vector<int> rows, cols;
+ std::vector<double> values;
+ for (int i = 0; i < (num_rows * num_cols); ++i) {
+ const int r = i / num_cols, c = i % num_cols;
+ if (r != c) {
+ rows.push_back(r);
+ cols.push_back(c);
+ values.push_back(i + 1);
+ }
+ }
+ ASSERT_EQ(values.size(), expected_num_nonzeros);
+
+ tsm.reset(new TripletSparseMatrix(num_rows,
+ num_cols,
+ expected_num_nonzeros));
+ std::copy(rows.begin(), rows.end(), tsm->mutable_rows());
+ std::copy(cols.begin(), cols.end(), tsm->mutable_cols());
+ std::copy(values.begin(), values.end(), tsm->mutable_values());
+ tsm->set_num_nonzeros(values.size());
+
+ Matrix dense_from_tsm;
+ tsm->ToDenseMatrix(&dense_from_tsm);
+ ASSERT_TRUE((dense.array() == dense_from_tsm.array()).all());
+
+ crsm.reset(new CompressedRowSparseMatrix(*tsm));
+ Matrix dense_from_crsm;
+ crsm->ToDenseMatrix(&dense_from_crsm);
+ ASSERT_TRUE((dense.array() == dense_from_crsm.array()).all());
+ }
+
+ void InsertNonZeroEntriesFromDenseReference() {
+ for (int r = 0; r < num_rows; ++r) {
+ for (int c = 0; c < num_cols; ++c) {
+ const double& v = dense(r, c);
+ if (v != 0.0) {
+ dcrsm->InsertEntry(r, c, v);
+ }
+ }
+ }
+ }
+
+ void ExpectEmpty() {
+ EXPECT_EQ(dcrsm->num_rows(), num_rows);
+ EXPECT_EQ(dcrsm->num_cols(), num_cols);
+ EXPECT_EQ(dcrsm->num_nonzeros(), 0);
+
+ Matrix dense_from_dcrsm;
+ dcrsm->ToDenseMatrix(&dense_from_dcrsm);
+ EXPECT_EQ(dense_from_dcrsm.rows(), num_rows);
+ EXPECT_EQ(dense_from_dcrsm.cols(), num_cols);
+ EXPECT_TRUE((dense_from_dcrsm.array() == 0.0).all());
+ }
+
+ void ExpectEqualToDenseReference() {
+ Matrix dense_from_dcrsm;
+ dcrsm->ToDenseMatrix(&dense_from_dcrsm);
+ EXPECT_TRUE((dense.array() == dense_from_dcrsm.array()).all());
+ }
+
+ void ExpectEqualToCompressedRowSparseMatrixReference() {
+ typedef Eigen::Map<const Eigen::VectorXi> ConstIntVectorRef;
+
+ ConstIntVectorRef crsm_rows(crsm->rows(), crsm->num_rows() + 1);
+ ConstIntVectorRef dcrsm_rows(dcrsm->rows(), dcrsm->num_rows() + 1);
+ EXPECT_TRUE((crsm_rows.array() == dcrsm_rows.array()).all());
+
+ ConstIntVectorRef crsm_cols(crsm->cols(), crsm->num_nonzeros());
+ ConstIntVectorRef dcrsm_cols(dcrsm->cols(), dcrsm->num_nonzeros());
+ EXPECT_TRUE((crsm_cols.array() == dcrsm_cols.array()).all());
+
+ ConstVectorRef crsm_values(crsm->values(), crsm->num_nonzeros());
+ ConstVectorRef dcrsm_values(dcrsm->values(), dcrsm->num_nonzeros());
+ EXPECT_TRUE((crsm_values.array() == dcrsm_values.array()).all());
+ }
+
+ int num_rows;
+ int num_cols;
+
+ int num_additional_elements;
+
+ int expected_num_nonzeros;
+
+ Matrix dense;
+ scoped_ptr<TripletSparseMatrix> tsm;
+ scoped_ptr<CompressedRowSparseMatrix> crsm;
+
+ scoped_ptr<DynamicCompressedRowSparseMatrix> dcrsm;
+};
+
+TEST_F(DynamicCompressedRowSparseMatrixTest, Initialization) {
+ ExpectEmpty();
+
+ Finalize();
+ ExpectEmpty();
+}
+
+TEST_F(DynamicCompressedRowSparseMatrixTest, InsertEntryAndFinalize) {
+ InsertNonZeroEntriesFromDenseReference();
+ ExpectEmpty();
+
+ Finalize();
+ ExpectEqualToDenseReference();
+ ExpectEqualToCompressedRowSparseMatrixReference();
+}
+
+TEST_F(DynamicCompressedRowSparseMatrixTest, ClearRows) {
+ InsertNonZeroEntriesFromDenseReference();
+ Finalize();
+ ExpectEqualToDenseReference();
+ ExpectEqualToCompressedRowSparseMatrixReference();
+
+ dcrsm->ClearRows(0, 0);
+ Finalize();
+ ExpectEqualToDenseReference();
+ ExpectEqualToCompressedRowSparseMatrixReference();
+
+ dcrsm->ClearRows(0, num_rows);
+ ExpectEqualToCompressedRowSparseMatrixReference();
+
+ Finalize();
+ ExpectEmpty();
+
+ InsertNonZeroEntriesFromDenseReference();
+ dcrsm->ClearRows(1, 2);
+ Finalize();
+ dense.block(1, 0, 2, num_cols).setZero();
+ ExpectEqualToDenseReference();
+
+ InitialiseDenseReference();
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/dynamic_numeric_diff_cost_function_test.cc b/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
new file mode 100644
index 0000000..19f4d88
--- /dev/null
+++ b/internal/ceres/dynamic_numeric_diff_cost_function_test.cc
@@ -0,0 +1,519 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+// mierle@gmail.com (Keir Mierle)
+
+#include <cstddef>
+
+#include "ceres/dynamic_numeric_diff_cost_function.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+const double kTolerance = 1e-6;
+
+// Takes 2 parameter blocks:
+// parameters[0] is size 10.
+// parameters[1] is size 5.
+// Emits 21 residuals:
+// A: i - parameters[0][i], for i in [0,10) -- this is 10 residuals
+// B: parameters[0][i] - i, for i in [0,10) -- this is another 10.
+// C: sum(parameters[0][i]^2 - 8*parameters[0][i]) + sum(parameters[1][i])
+class MyCostFunctor {
+ public:
+ bool operator()(double const* const* parameters, double* residuals) const {
+ const double* params0 = parameters[0];
+ int r = 0;
+ for (int i = 0; i < 10; ++i) {
+ residuals[r++] = i - params0[i];
+ residuals[r++] = params0[i] - i;
+ }
+
+ double c_residual = 0.0;
+ for (int i = 0; i < 10; ++i) {
+ c_residual += pow(params0[i], 2) - 8.0 * params0[i];
+ }
+
+ const double* params1 = parameters[1];
+ for (int i = 0; i < 5; ++i) {
+ c_residual += params1[i];
+ }
+ residuals[r++] = c_residual;
+ return true;
+ }
+};
+
+TEST(DynamicNumericdiffCostFunctionTest, TestResiduals) {
+ vector<double> param_block_0(10, 0.0);
+ vector<double> param_block_1(5, 0.0);
+ DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+ new MyCostFunctor());
+ cost_function.AddParameterBlock(param_block_0.size());
+ cost_function.AddParameterBlock(param_block_1.size());
+ cost_function.SetNumResiduals(21);
+
+ // Test residual computation.
+ vector<double> residuals(21, -100000);
+ vector<double*> parameter_blocks(2);
+ parameter_blocks[0] = &param_block_0[0];
+ parameter_blocks[1] = &param_block_1[0];
+ EXPECT_TRUE(cost_function.Evaluate(&parameter_blocks[0],
+ residuals.data(),
+ NULL));
+ for (int r = 0; r < 10; ++r) {
+ EXPECT_EQ(1.0 * r, residuals.at(r * 2));
+ EXPECT_EQ(-1.0 * r, residuals.at(r * 2 + 1));
+ }
+ EXPECT_EQ(0, residuals.at(20));
+}
+
+
+TEST(DynamicNumericdiffCostFunctionTest, TestJacobian) {
+ // Test the residual counting.
+ vector<double> param_block_0(10, 0.0);
+ for (int i = 0; i < 10; ++i) {
+ param_block_0[i] = 2 * i;
+ }
+ vector<double> param_block_1(5, 0.0);
+ DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+ new MyCostFunctor());
+ cost_function.AddParameterBlock(param_block_0.size());
+ cost_function.AddParameterBlock(param_block_1.size());
+ cost_function.SetNumResiduals(21);
+
+ // Prepare the residuals.
+ vector<double> residuals(21, -100000);
+
+ // Prepare the parameters.
+ vector<double*> parameter_blocks(2);
+ parameter_blocks[0] = &param_block_0[0];
+ parameter_blocks[1] = &param_block_1[0];
+
+ // Prepare the jacobian.
+ vector<vector<double> > jacobian_vect(2);
+ jacobian_vect[0].resize(21 * 10, -100000);
+ jacobian_vect[1].resize(21 * 5, -100000);
+ vector<double*> jacobian;
+ jacobian.push_back(jacobian_vect[0].data());
+ jacobian.push_back(jacobian_vect[1].data());
+
+ // Test jacobian computation.
+ EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
+ residuals.data(),
+ jacobian.data()));
+
+ for (int r = 0; r < 10; ++r) {
+ EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
+ EXPECT_EQ(+1.0 * r, residuals.at(r * 2 + 1));
+ }
+ EXPECT_EQ(420, residuals.at(20));
+ for (int p = 0; p < 10; ++p) {
+ // Check "A" Jacobian.
+ EXPECT_NEAR(-1.0, jacobian_vect[0][2*p * 10 + p], kTolerance);
+ // Check "B" Jacobian.
+ EXPECT_NEAR(+1.0, jacobian_vect[0][(2*p+1) * 10 + p], kTolerance);
+ jacobian_vect[0][2*p * 10 + p] = 0.0;
+ jacobian_vect[0][(2*p+1) * 10 + p] = 0.0;
+ }
+
+ // Check "C" Jacobian for first parameter block.
+ for (int p = 0; p < 10; ++p) {
+ EXPECT_NEAR(4 * p - 8, jacobian_vect[0][20 * 10 + p], kTolerance);
+ jacobian_vect[0][20 * 10 + p] = 0.0;
+ }
+ for (int i = 0; i < jacobian_vect[0].size(); ++i) {
+ EXPECT_NEAR(0.0, jacobian_vect[0][i], kTolerance);
+ }
+
+ // Check "C" Jacobian for second parameter block.
+ for (int p = 0; p < 5; ++p) {
+ EXPECT_NEAR(1.0, jacobian_vect[1][20 * 5 + p], kTolerance);
+ jacobian_vect[1][20 * 5 + p] = 0.0;
+ }
+ for (int i = 0; i < jacobian_vect[1].size(); ++i) {
+ EXPECT_NEAR(0.0, jacobian_vect[1][i], kTolerance);
+ }
+}
+
+TEST(DynamicNumericdiffCostFunctionTest, JacobianWithFirstParameterBlockConstant) { // NOLINT
+ // Test the residual counting.
+ vector<double> param_block_0(10, 0.0);
+ for (int i = 0; i < 10; ++i) {
+ param_block_0[i] = 2 * i;
+ }
+ vector<double> param_block_1(5, 0.0);
+ DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+ new MyCostFunctor());
+ cost_function.AddParameterBlock(param_block_0.size());
+ cost_function.AddParameterBlock(param_block_1.size());
+ cost_function.SetNumResiduals(21);
+
+ // Prepare the residuals.
+ vector<double> residuals(21, -100000);
+
+ // Prepare the parameters.
+ vector<double*> parameter_blocks(2);
+ parameter_blocks[0] = &param_block_0[0];
+ parameter_blocks[1] = &param_block_1[0];
+
+ // Prepare the jacobian.
+ vector<vector<double> > jacobian_vect(2);
+ jacobian_vect[0].resize(21 * 10, -100000);
+ jacobian_vect[1].resize(21 * 5, -100000);
+ vector<double*> jacobian;
+ jacobian.push_back(NULL);
+ jacobian.push_back(jacobian_vect[1].data());
+
+ // Test jacobian computation.
+ EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
+ residuals.data(),
+ jacobian.data()));
+
+ for (int r = 0; r < 10; ++r) {
+ EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
+ EXPECT_EQ(+1.0 * r, residuals.at(r * 2 + 1));
+ }
+ EXPECT_EQ(420, residuals.at(20));
+
+ // Check "C" Jacobian for second parameter block.
+ for (int p = 0; p < 5; ++p) {
+ EXPECT_NEAR(1.0, jacobian_vect[1][20 * 5 + p], kTolerance);
+ jacobian_vect[1][20 * 5 + p] = 0.0;
+ }
+ for (int i = 0; i < jacobian_vect[1].size(); ++i) {
+ EXPECT_EQ(0.0, jacobian_vect[1][i]);
+ }
+}
+
+TEST(DynamicNumericdiffCostFunctionTest, JacobianWithSecondParameterBlockConstant) { // NOLINT
+ // Test the residual counting.
+ vector<double> param_block_0(10, 0.0);
+ for (int i = 0; i < 10; ++i) {
+ param_block_0[i] = 2 * i;
+ }
+ vector<double> param_block_1(5, 0.0);
+ DynamicNumericDiffCostFunction<MyCostFunctor> cost_function(
+ new MyCostFunctor());
+ cost_function.AddParameterBlock(param_block_0.size());
+ cost_function.AddParameterBlock(param_block_1.size());
+ cost_function.SetNumResiduals(21);
+
+ // Prepare the residuals.
+ vector<double> residuals(21, -100000);
+
+ // Prepare the parameters.
+ vector<double*> parameter_blocks(2);
+ parameter_blocks[0] = &param_block_0[0];
+ parameter_blocks[1] = &param_block_1[0];
+
+ // Prepare the jacobian.
+ vector<vector<double> > jacobian_vect(2);
+ jacobian_vect[0].resize(21 * 10, -100000);
+ jacobian_vect[1].resize(21 * 5, -100000);
+ vector<double*> jacobian;
+ jacobian.push_back(jacobian_vect[0].data());
+ jacobian.push_back(NULL);
+
+ // Test jacobian computation.
+ EXPECT_TRUE(cost_function.Evaluate(parameter_blocks.data(),
+ residuals.data(),
+ jacobian.data()));
+
+ for (int r = 0; r < 10; ++r) {
+ EXPECT_EQ(-1.0 * r, residuals.at(r * 2));
+ EXPECT_EQ(+1.0 * r, residuals.at(r * 2 + 1));
+ }
+ EXPECT_EQ(420, residuals.at(20));
+ for (int p = 0; p < 10; ++p) {
+ // Check "A" Jacobian.
+ EXPECT_NEAR(-1.0, jacobian_vect[0][2*p * 10 + p], kTolerance);
+ // Check "B" Jacobian.
+ EXPECT_NEAR(+1.0, jacobian_vect[0][(2*p+1) * 10 + p], kTolerance);
+ jacobian_vect[0][2*p * 10 + p] = 0.0;
+ jacobian_vect[0][(2*p+1) * 10 + p] = 0.0;
+ }
+
+ // Check "C" Jacobian for first parameter block.
+ for (int p = 0; p < 10; ++p) {
+ EXPECT_NEAR(4 * p - 8, jacobian_vect[0][20 * 10 + p], kTolerance);
+ jacobian_vect[0][20 * 10 + p] = 0.0;
+ }
+ for (int i = 0; i < jacobian_vect[0].size(); ++i) {
+ EXPECT_EQ(0.0, jacobian_vect[0][i]);
+ }
+}
+
+// Takes 3 parameter blocks:
+// parameters[0] (x) is size 1.
+// parameters[1] (y) is size 2.
+// parameters[2] (z) is size 3.
+// Emits 7 residuals:
+// A: x[0] (= sum_x)
+// B: y[0] + 2.0 * y[1] (= sum_y)
+// C: z[0] + 3.0 * z[1] + 6.0 * z[2] (= sum_z)
+// D: sum_x * sum_y
+// E: sum_y * sum_z
+// F: sum_x * sum_z
+// G: sum_x * sum_y * sum_z
+class MyThreeParameterCostFunctor {
+ public:
+ template <typename T>
+ bool operator()(T const* const* parameters, T* residuals) const {
+ const T* x = parameters[0];
+ const T* y = parameters[1];
+ const T* z = parameters[2];
+
+ T sum_x = x[0];
+ T sum_y = y[0] + 2.0 * y[1];
+ T sum_z = z[0] + 3.0 * z[1] + 6.0 * z[2];
+
+ residuals[0] = sum_x;
+ residuals[1] = sum_y;
+ residuals[2] = sum_z;
+ residuals[3] = sum_x * sum_y;
+ residuals[4] = sum_y * sum_z;
+ residuals[5] = sum_x * sum_z;
+ residuals[6] = sum_x * sum_y * sum_z;
+ return true;
+ }
+};
+
+class ThreeParameterCostFunctorTest : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ // Prepare the parameters.
+ x_.resize(1);
+ x_[0] = 0.0;
+
+ y_.resize(2);
+ y_[0] = 1.0;
+ y_[1] = 3.0;
+
+ z_.resize(3);
+ z_[0] = 2.0;
+ z_[1] = 4.0;
+ z_[2] = 6.0;
+
+ parameter_blocks_.resize(3);
+ parameter_blocks_[0] = &x_[0];
+ parameter_blocks_[1] = &y_[0];
+ parameter_blocks_[2] = &z_[0];
+
+ // Prepare the cost function.
+ typedef DynamicNumericDiffCostFunction<MyThreeParameterCostFunctor>
+ DynamicMyThreeParameterCostFunction;
+ DynamicMyThreeParameterCostFunction * cost_function =
+ new DynamicMyThreeParameterCostFunction(
+ new MyThreeParameterCostFunctor());
+ cost_function->AddParameterBlock(1);
+ cost_function->AddParameterBlock(2);
+ cost_function->AddParameterBlock(3);
+ cost_function->SetNumResiduals(7);
+
+ cost_function_.reset(cost_function);
+
+ // Setup jacobian data.
+ jacobian_vect_.resize(3);
+ jacobian_vect_[0].resize(7 * x_.size(), -100000);
+ jacobian_vect_[1].resize(7 * y_.size(), -100000);
+ jacobian_vect_[2].resize(7 * z_.size(), -100000);
+
+ // Prepare the expected residuals.
+ const double sum_x = x_[0];
+ const double sum_y = y_[0] + 2.0 * y_[1];
+ const double sum_z = z_[0] + 3.0 * z_[1] + 6.0 * z_[2];
+
+ expected_residuals_.resize(7);
+ expected_residuals_[0] = sum_x;
+ expected_residuals_[1] = sum_y;
+ expected_residuals_[2] = sum_z;
+ expected_residuals_[3] = sum_x * sum_y;
+ expected_residuals_[4] = sum_y * sum_z;
+ expected_residuals_[5] = sum_x * sum_z;
+ expected_residuals_[6] = sum_x * sum_y * sum_z;
+
+ // Prepare the expected jacobian entries.
+ expected_jacobian_x_.resize(7);
+ expected_jacobian_x_[0] = 1.0;
+ expected_jacobian_x_[1] = 0.0;
+ expected_jacobian_x_[2] = 0.0;
+ expected_jacobian_x_[3] = sum_y;
+ expected_jacobian_x_[4] = 0.0;
+ expected_jacobian_x_[5] = sum_z;
+ expected_jacobian_x_[6] = sum_y * sum_z;
+
+ expected_jacobian_y_.resize(14);
+ expected_jacobian_y_[0] = 0.0;
+ expected_jacobian_y_[1] = 0.0;
+ expected_jacobian_y_[2] = 1.0;
+ expected_jacobian_y_[3] = 2.0;
+ expected_jacobian_y_[4] = 0.0;
+ expected_jacobian_y_[5] = 0.0;
+ expected_jacobian_y_[6] = sum_x;
+ expected_jacobian_y_[7] = 2.0 * sum_x;
+ expected_jacobian_y_[8] = sum_z;
+ expected_jacobian_y_[9] = 2.0 * sum_z;
+ expected_jacobian_y_[10] = 0.0;
+ expected_jacobian_y_[11] = 0.0;
+ expected_jacobian_y_[12] = sum_x * sum_z;
+ expected_jacobian_y_[13] = 2.0 * sum_x * sum_z;
+
+ expected_jacobian_z_.resize(21);
+ expected_jacobian_z_[0] = 0.0;
+ expected_jacobian_z_[1] = 0.0;
+ expected_jacobian_z_[2] = 0.0;
+ expected_jacobian_z_[3] = 0.0;
+ expected_jacobian_z_[4] = 0.0;
+ expected_jacobian_z_[5] = 0.0;
+ expected_jacobian_z_[6] = 1.0;
+ expected_jacobian_z_[7] = 3.0;
+ expected_jacobian_z_[8] = 6.0;
+ expected_jacobian_z_[9] = 0.0;
+ expected_jacobian_z_[10] = 0.0;
+ expected_jacobian_z_[11] = 0.0;
+ expected_jacobian_z_[12] = sum_y;
+ expected_jacobian_z_[13] = 3.0 * sum_y;
+ expected_jacobian_z_[14] = 6.0 * sum_y;
+ expected_jacobian_z_[15] = sum_x;
+ expected_jacobian_z_[16] = 3.0 * sum_x;
+ expected_jacobian_z_[17] = 6.0 * sum_x;
+ expected_jacobian_z_[18] = sum_x * sum_y;
+ expected_jacobian_z_[19] = 3.0 * sum_x * sum_y;
+ expected_jacobian_z_[20] = 6.0 * sum_x * sum_y;
+ }
+
+ protected:
+ vector<double> x_;
+ vector<double> y_;
+ vector<double> z_;
+
+ vector<double*> parameter_blocks_;
+
+ scoped_ptr<CostFunction> cost_function_;
+
+ vector<vector<double> > jacobian_vect_;
+
+ vector<double> expected_residuals_;
+
+ vector<double> expected_jacobian_x_;
+ vector<double> expected_jacobian_y_;
+ vector<double> expected_jacobian_z_;
+};
+
+TEST_F(ThreeParameterCostFunctorTest, TestThreeParameterResiduals) {
+ vector<double> residuals(7, -100000);
+ EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+ residuals.data(),
+ NULL));
+ for (int i = 0; i < 7; ++i) {
+ EXPECT_EQ(expected_residuals_[i], residuals[i]);
+ }
+}
+
+TEST_F(ThreeParameterCostFunctorTest, TestThreeParameterJacobian) {
+ vector<double> residuals(7, -100000);
+
+ vector<double*> jacobian;
+ jacobian.push_back(jacobian_vect_[0].data());
+ jacobian.push_back(jacobian_vect_[1].data());
+ jacobian.push_back(jacobian_vect_[2].data());
+
+ EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+ residuals.data(),
+ jacobian.data()));
+
+ for (int i = 0; i < 7; ++i) {
+ EXPECT_EQ(expected_residuals_[i], residuals[i]);
+ }
+
+ for (int i = 0; i < 7; ++i) {
+ EXPECT_NEAR(expected_jacobian_x_[i], jacobian[0][i], kTolerance);
+ }
+
+ for (int i = 0; i < 14; ++i) {
+ EXPECT_NEAR(expected_jacobian_y_[i], jacobian[1][i], kTolerance);
+ }
+
+ for (int i = 0; i < 21; ++i) {
+ EXPECT_NEAR(expected_jacobian_z_[i], jacobian[2][i], kTolerance);
+ }
+}
+
+TEST_F(ThreeParameterCostFunctorTest,
+ ThreeParameterJacobianWithFirstAndLastParameterBlockConstant) {
+ vector<double> residuals(7, -100000);
+
+ vector<double*> jacobian;
+ jacobian.push_back(NULL);
+ jacobian.push_back(jacobian_vect_[1].data());
+ jacobian.push_back(NULL);
+
+ EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+ residuals.data(),
+ jacobian.data()));
+
+ for (int i = 0; i < 7; ++i) {
+ EXPECT_EQ(expected_residuals_[i], residuals[i]);
+ }
+
+ for (int i = 0; i < 14; ++i) {
+ EXPECT_NEAR(expected_jacobian_y_[i], jacobian[1][i], kTolerance);
+ }
+}
+
+TEST_F(ThreeParameterCostFunctorTest,
+ ThreeParameterJacobianWithSecondParameterBlockConstant) {
+ vector<double> residuals(7, -100000);
+
+ vector<double*> jacobian;
+ jacobian.push_back(jacobian_vect_[0].data());
+ jacobian.push_back(NULL);
+ jacobian.push_back(jacobian_vect_[2].data());
+
+ EXPECT_TRUE(cost_function_->Evaluate(parameter_blocks_.data(),
+ residuals.data(),
+ jacobian.data()));
+
+ for (int i = 0; i < 7; ++i) {
+ EXPECT_EQ(expected_residuals_[i], residuals[i]);
+ }
+
+ for (int i = 0; i < 7; ++i) {
+ EXPECT_NEAR(expected_jacobian_x_[i], jacobian[0][i], kTolerance);
+ }
+
+ for (int i = 0; i < 21; ++i) {
+ EXPECT_NEAR(expected_jacobian_z_[i], jacobian[2][i], kTolerance);
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/evaluator.cc b/internal/ceres/evaluator.cc
index 31a4176..c94c62c 100644
--- a/internal/ceres/evaluator.cc
+++ b/internal/ceres/evaluator.cc
@@ -35,6 +35,8 @@
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/crs_matrix.h"
#include "ceres/dense_jacobian_writer.h"
+#include "ceres/dynamic_compressed_row_finalizer.h"
+#include "ceres/dynamic_compressed_row_jacobian_writer.h"
#include "ceres/evaluator.h"
#include "ceres/internal/port.h"
#include "ceres/program_evaluator.h"
@@ -63,9 +65,17 @@ Evaluator* Evaluator::Create(const Evaluator::Options& options,
BlockJacobianWriter>(options,
program);
case SPARSE_NORMAL_CHOLESKY:
- return new ProgramEvaluator<ScratchEvaluatePreparer,
- CompressedRowJacobianWriter>(options,
- program);
+ if (options.dynamic_sparsity) {
+ return new ProgramEvaluator<ScratchEvaluatePreparer,
+ DynamicCompressedRowJacobianWriter,
+ DynamicCompressedRowJacobianFinalizer>(
+ options, program);
+ } else {
+ return new ProgramEvaluator<ScratchEvaluatePreparer,
+ CompressedRowJacobianWriter>(options,
+ program);
+ }
+
default:
*error = "Invalid Linear Solver Type. Unable to create evaluator.";
return NULL;
diff --git a/internal/ceres/evaluator.h b/internal/ceres/evaluator.h
index 3d25462..8fc60b8 100644
--- a/internal/ceres/evaluator.h
+++ b/internal/ceres/evaluator.h
@@ -61,11 +61,13 @@ class Evaluator {
Options()
: num_threads(1),
num_eliminate_blocks(-1),
- linear_solver_type(DENSE_QR) {}
+ linear_solver_type(DENSE_QR),
+ dynamic_sparsity(false) {}
int num_threads;
int num_eliminate_blocks;
LinearSolverType linear_solver_type;
+ bool dynamic_sparsity;
};
static Evaluator* Create(const Options& options,
diff --git a/internal/ceres/evaluator_test.cc b/internal/ceres/evaluator_test.cc
index ea24504..c0de3fc 100644
--- a/internal/ceres/evaluator_test.cc
+++ b/internal/ceres/evaluator_test.cc
@@ -44,6 +44,7 @@
#include "ceres/program.h"
#include "ceres/sized_cost_function.h"
#include "ceres/sparse_matrix.h"
+#include "ceres/stringprintf.h"
#include "ceres/types.h"
#include "gtest/gtest.h"
@@ -91,18 +92,42 @@ class ParameterIgnoringCostFunction
}
};
+struct EvaluatorTestOptions {
+ EvaluatorTestOptions(LinearSolverType linear_solver_type,
+ int num_eliminate_blocks,
+ bool dynamic_sparsity = false)
+ : linear_solver_type(linear_solver_type),
+ num_eliminate_blocks(num_eliminate_blocks),
+ dynamic_sparsity(dynamic_sparsity) {}
+
+ LinearSolverType linear_solver_type;
+ int num_eliminate_blocks;
+ bool dynamic_sparsity;
+};
+
struct EvaluatorTest
- : public ::testing::TestWithParam<pair<LinearSolverType, int> > {
+ : public ::testing::TestWithParam<EvaluatorTestOptions> {
Evaluator* CreateEvaluator(Program* program) {
// This program is straight from the ProblemImpl, and so has no index/offset
// yet; compute it here as required by the evalutor implementations.
program->SetParameterOffsetsAndIndex();
- VLOG(1) << "Creating evaluator with type: " << GetParam().first
- << " and num_eliminate_blocks: " << GetParam().second;
+ if (VLOG_IS_ON(1)) {
+ string report;
+ StringAppendF(&report, "Creating evaluator with type: %d",
+ GetParam().linear_solver_type);
+ if (GetParam().linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
+ StringAppendF(&report, ", dynamic_sparsity: %d",
+ GetParam().dynamic_sparsity);
+ }
+ StringAppendF(&report, " and num_eliminate_blocks: %d",
+ GetParam().num_eliminate_blocks);
+ VLOG(1) << report;
+ }
Evaluator::Options options;
- options.linear_solver_type = GetParam().first;
- options.num_eliminate_blocks = GetParam().second;
+ options.linear_solver_type = GetParam().linear_solver_type;
+ options.num_eliminate_blocks = GetParam().num_eliminate_blocks;
+ options.dynamic_sparsity = GetParam().dynamic_sparsity;
string error;
return Evaluator::Create(options, program, &error);
}
@@ -517,23 +542,25 @@ TEST_P(EvaluatorTest, EvaluatorAbortsForResidualsThatFailToEvaluate) {
INSTANTIATE_TEST_CASE_P(
LinearSolvers,
EvaluatorTest,
- ::testing::Values(make_pair(DENSE_QR, 0),
- make_pair(DENSE_SCHUR, 0),
- make_pair(DENSE_SCHUR, 1),
- make_pair(DENSE_SCHUR, 2),
- make_pair(DENSE_SCHUR, 3),
- make_pair(DENSE_SCHUR, 4),
- make_pair(SPARSE_SCHUR, 0),
- make_pair(SPARSE_SCHUR, 1),
- make_pair(SPARSE_SCHUR, 2),
- make_pair(SPARSE_SCHUR, 3),
- make_pair(SPARSE_SCHUR, 4),
- make_pair(ITERATIVE_SCHUR, 0),
- make_pair(ITERATIVE_SCHUR, 1),
- make_pair(ITERATIVE_SCHUR, 2),
- make_pair(ITERATIVE_SCHUR, 3),
- make_pair(ITERATIVE_SCHUR, 4),
- make_pair(SPARSE_NORMAL_CHOLESKY, 0)));
+ ::testing::Values(
+ EvaluatorTestOptions(DENSE_QR, 0),
+ EvaluatorTestOptions(DENSE_SCHUR, 0),
+ EvaluatorTestOptions(DENSE_SCHUR, 1),
+ EvaluatorTestOptions(DENSE_SCHUR, 2),
+ EvaluatorTestOptions(DENSE_SCHUR, 3),
+ EvaluatorTestOptions(DENSE_SCHUR, 4),
+ EvaluatorTestOptions(SPARSE_SCHUR, 0),
+ EvaluatorTestOptions(SPARSE_SCHUR, 1),
+ EvaluatorTestOptions(SPARSE_SCHUR, 2),
+ EvaluatorTestOptions(SPARSE_SCHUR, 3),
+ EvaluatorTestOptions(SPARSE_SCHUR, 4),
+ EvaluatorTestOptions(ITERATIVE_SCHUR, 0),
+ EvaluatorTestOptions(ITERATIVE_SCHUR, 1),
+ EvaluatorTestOptions(ITERATIVE_SCHUR, 2),
+ EvaluatorTestOptions(ITERATIVE_SCHUR, 3),
+ EvaluatorTestOptions(ITERATIVE_SCHUR, 4),
+ EvaluatorTestOptions(SPARSE_NORMAL_CHOLESKY, 0, false),
+ EvaluatorTestOptions(SPARSE_NORMAL_CHOLESKY, 0, true)));
// Simple cost function used to check if the evaluator is sensitive to
// state changes.
diff --git a/internal/ceres/generate_eliminator_specialization.py b/internal/ceres/generate_eliminator_specialization.py
index caeca69..2ec3c5b 100644
--- a/internal/ceres/generate_eliminator_specialization.py
+++ b/internal/ceres/generate_eliminator_specialization.py
@@ -59,7 +59,10 @@ SPECIALIZATIONS = [(2, 2, 2),
(2, 3, "Eigen::Dynamic"),
(2, 4, 3),
(2, 4, 4),
+ (2, 4, 8),
+ (2, 4, 9),
(2, 4, "Eigen::Dynamic"),
+ (2, "Eigen::Dynamic", "Eigen::Dynamic"),
(4, 4, 2),
(4, 4, 3),
(4, 4, 4),
@@ -123,6 +126,9 @@ template class SchurEliminator<%s, %s, %s>;
"""
SPECIALIZATION_FILE = """
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generate_partitioned_matrix_view_specializations.py b/internal/ceres/generate_partitioned_matrix_view_specializations.py
new file mode 100644
index 0000000..c9bdf23
--- /dev/null
+++ b/internal/ceres/generate_partitioned_matrix_view_specializations.py
@@ -0,0 +1,231 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2013 Google Inc. All rights reserved.
+# http://code.google.com/p/ceres-solver/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: sameeragarwal@google.com (Sameer Agarwal)
+#
+# Script for explicitly generating template specialization of the
+# PartitionedMatrixView class. Explicitly generating these
+# instantiations in separate .cc files breaks the compilation into
+# separate compilation unit rather than one large cc file.
+#
+# This script creates two sets of files.
+#
+# 1. partitioned_matrix_view_x_x_x.cc
+# where the x indicates the template parameters and
+#
+# 2. partitioned_matrix_view.cc
+#
+# that contains a factory function for instantiating these classes
+# based on runtime parameters.
+#
+# The list of tuples, specializations indicates the set of
+# specializations that is generated.
+
+# Set of template specializations to generate
+SPECIALIZATIONS = [(2, 2, 2),
+ (2, 2, 3),
+ (2, 2, 4),
+ (2, 2, "Eigen::Dynamic"),
+ (2, 3, 3),
+ (2, 3, 4),
+ (2, 3, 9),
+ (2, 3, "Eigen::Dynamic"),
+ (2, 4, 3),
+ (2, 4, 4),
+ (2, 4, 8),
+ (2, 4, 9),
+ (2, 4, "Eigen::Dynamic"),
+ (2, "Eigen::Dynamic", "Eigen::Dynamic"),
+ (4, 4, 2),
+ (4, 4, 3),
+ (4, 4, 4),
+ (4, 4, "Eigen::Dynamic"),
+ ("Eigen::Dynamic", "Eigen::Dynamic", "Eigen::Dynamic")]
+HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+"""
+
+DYNAMIC_FILE = """
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<%s, %s, %s>;
+
+} // namespace internal
+} // namespace ceres
+"""
+
+SPECIALIZATION_FILE = """
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<%s, %s, %s>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
+"""
+
+FACTORY_FILE_HEADER = """
+#include "ceres/linear_solver.h"
+#include "ceres/partitioned_matrix_view.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+PartitionedMatrixViewBase*
+PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
+ const BlockSparseMatrix& matrix) {
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+"""
+
+FACTORY_CONDITIONAL = """ if ((options.row_block_size == %s) &&
+ (options.e_block_size == %s) &&
+ (options.f_block_size == %s)) {
+ return new PartitionedMatrixView<%s, %s, %s>(
+ matrix, options.elimination_groups[0]);
+ }
+"""
+
+FACTORY_FOOTER = """
+#endif
+ VLOG(1) << "Template specializations not found for <"
+ << options.row_block_size << ","
+ << options.e_block_size << ","
+ << options.f_block_size << ">";
+ return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
+ matrix, options.elimination_groups[0]);
+};
+
+} // namespace internal
+} // namespace ceres
+"""
+
+
+def SuffixForSize(size):
+ if size == "Eigen::Dynamic":
+ return "d"
+ return str(size)
+
+
+def SpecializationFilename(prefix, row_block_size, e_block_size, f_block_size):
+ return "_".join([prefix] + map(SuffixForSize, (row_block_size,
+ e_block_size,
+ f_block_size)))
+
+
+def Specialize():
+ """
+ Generate specialization code and the conditionals to instantiate it.
+ """
+ f = open("partitioned_matrix_view.cc", "w")
+ f.write(HEADER)
+ f.write(FACTORY_FILE_HEADER)
+
+ for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
+ output = SpecializationFilename("generated/partitioned_matrix_view",
+ row_block_size,
+ e_block_size,
+ f_block_size) + ".cc"
+ fptr = open(output, "w")
+ fptr.write(HEADER)
+
+ template = SPECIALIZATION_FILE
+ if (row_block_size == "Eigen::Dynamic" and
+ e_block_size == "Eigen::Dynamic" and
+ f_block_size == "Eigen::Dynamic"):
+ template = DYNAMIC_FILE
+
+ fptr.write(template % (row_block_size, e_block_size, f_block_size))
+ fptr.close()
+
+ f.write(FACTORY_CONDITIONAL % (row_block_size,
+ e_block_size,
+ f_block_size,
+ row_block_size,
+ e_block_size,
+ f_block_size))
+ f.write(FACTORY_FOOTER)
+ f.close()
+
+
+if __name__ == "__main__":
+ Specialize()
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc b/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
new file mode 100644
index 0000000..a7d802a
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 2, 2>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc b/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
new file mode 100644
index 0000000..89e6f77
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 2, 3>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc b/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
new file mode 100644
index 0000000..3a3e8b6
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 2, 4>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc b/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
new file mode 100644
index 0000000..661f135
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 2, Eigen::Dynamic>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
new file mode 100644
index 0000000..e79e001
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, 3>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
new file mode 100644
index 0000000..2f1ae68
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, 4>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
new file mode 100644
index 0000000..ab40550
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, 9>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc b/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
new file mode 100644
index 0000000..89ecff7
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 3, Eigen::Dynamic>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
new file mode 100644
index 0000000..182707d
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 3>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
new file mode 100644
index 0000000..a2cf8f4
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 4>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
new file mode 100644
index 0000000..a263769
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 8>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
new file mode 100644
index 0000000..d853860
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 9>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc b/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
new file mode 100644
index 0000000..7d622fc
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, Eigen::Dynamic>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc b/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
new file mode 100644
index 0000000..31981ca
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc b/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
new file mode 100644
index 0000000..d51ab5f
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<4, 4, 2>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc b/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
new file mode 100644
index 0000000..4b17fbd
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<4, 4, 3>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc b/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
new file mode 100644
index 0000000..7b5fe0f
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<4, 4, 4>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc b/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
new file mode 100644
index 0000000..c31fed3
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<4, 4, Eigen::Dynamic>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc b/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
new file mode 100644
index 0000000..a3308ed
--- /dev/null
+++ b/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
+
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>;
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/generated/schur_eliminator_2_2_2.cc b/internal/ceres/generated/schur_eliminator_2_2_2.cc
index 7f9ce14..db2a4dc 100644
--- a/internal/ceres/generated/schur_eliminator_2_2_2.cc
+++ b/internal/ceres/generated/schur_eliminator_2_2_2.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_2_3.cc b/internal/ceres/generated/schur_eliminator_2_2_3.cc
index d9ab1dd..f53c12a 100644
--- a/internal/ceres/generated/schur_eliminator_2_2_3.cc
+++ b/internal/ceres/generated/schur_eliminator_2_2_3.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_2_4.cc b/internal/ceres/generated/schur_eliminator_2_2_4.cc
index a268810..9e29383 100644
--- a/internal/ceres/generated/schur_eliminator_2_2_4.cc
+++ b/internal/ceres/generated/schur_eliminator_2_2_4.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_2_d.cc b/internal/ceres/generated/schur_eliminator_2_2_d.cc
index 46f9492..541def6 100644
--- a/internal/ceres/generated/schur_eliminator_2_2_d.cc
+++ b/internal/ceres/generated/schur_eliminator_2_2_d.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_3_3.cc b/internal/ceres/generated/schur_eliminator_2_3_3.cc
index ce53c6c..e450263 100644
--- a/internal/ceres/generated/schur_eliminator_2_3_3.cc
+++ b/internal/ceres/generated/schur_eliminator_2_3_3.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_3_4.cc b/internal/ceres/generated/schur_eliminator_2_3_4.cc
index 7f6d41d..0618c68 100644
--- a/internal/ceres/generated/schur_eliminator_2_3_4.cc
+++ b/internal/ceres/generated/schur_eliminator_2_3_4.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_3_9.cc b/internal/ceres/generated/schur_eliminator_2_3_9.cc
index 10f84af..c1ca665 100644
--- a/internal/ceres/generated/schur_eliminator_2_3_9.cc
+++ b/internal/ceres/generated/schur_eliminator_2_3_9.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_3_d.cc b/internal/ceres/generated/schur_eliminator_2_3_d.cc
index 047d473..1b6092c 100644
--- a/internal/ceres/generated/schur_eliminator_2_3_d.cc
+++ b/internal/ceres/generated/schur_eliminator_2_3_d.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_4_3.cc b/internal/ceres/generated/schur_eliminator_2_4_3.cc
index 12fdb86..edce8ef 100644
--- a/internal/ceres/generated/schur_eliminator_2_4_3.cc
+++ b/internal/ceres/generated/schur_eliminator_2_4_3.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_4_4.cc b/internal/ceres/generated/schur_eliminator_2_4_4.cc
index 0e29dc1..a6f3c52 100644
--- a/internal/ceres/generated/schur_eliminator_2_4_4.cc
+++ b/internal/ceres/generated/schur_eliminator_2_4_4.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_4_8.cc b/internal/ceres/generated/schur_eliminator_2_4_8.cc
new file mode 100644
index 0000000..bf2f0ab
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_8.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012, 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specialization.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, 8>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_4_9.cc b/internal/ceres/generated/schur_eliminator_2_4_9.cc
new file mode 100644
index 0000000..a63d0bb
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_9.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012, 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specialization.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, 9>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_2_4_d.cc b/internal/ceres/generated/schur_eliminator_2_4_d.cc
index 4d4ac56..b3a7fff 100644
--- a/internal/ceres/generated/schur_eliminator_2_4_d.cc
+++ b/internal/ceres/generated/schur_eliminator_2_4_d.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_2_d_d.cc b/internal/ceres/generated/schur_eliminator_2_d_d.cc
new file mode 100644
index 0000000..f4d28cd
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_d_d.cc
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012, 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specialization.py.
+// Editing it manually is not recommended.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/internal/ceres/generated/schur_eliminator_4_4_2.cc b/internal/ceres/generated/schur_eliminator_4_4_2.cc
index 4ad7d41..d1eadc1 100644
--- a/internal/ceres/generated/schur_eliminator_4_4_2.cc
+++ b/internal/ceres/generated/schur_eliminator_4_4_2.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_4_4_3.cc b/internal/ceres/generated/schur_eliminator_4_4_3.cc
index 87f2fc5..c340dbf 100644
--- a/internal/ceres/generated/schur_eliminator_4_4_3.cc
+++ b/internal/ceres/generated/schur_eliminator_4_4_3.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_4_4_4.cc b/internal/ceres/generated/schur_eliminator_4_4_4.cc
index 8b3f570..b7d58ad 100644
--- a/internal/ceres/generated/schur_eliminator_4_4_4.cc
+++ b/internal/ceres/generated/schur_eliminator_4_4_4.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_4_4_d.cc b/internal/ceres/generated/schur_eliminator_4_4_d.cc
index b21feb2..47e0059 100644
--- a/internal/ceres/generated/schur_eliminator_4_4_d.cc
+++ b/internal/ceres/generated/schur_eliminator_4_4_d.cc
@@ -37,9 +37,12 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
diff --git a/internal/ceres/generated/schur_eliminator_d_d_d.cc b/internal/ceres/generated/schur_eliminator_d_d_d.cc
index d483db7..d54a03c 100644
--- a/internal/ceres/generated/schur_eliminator_d_d_d.cc
+++ b/internal/ceres/generated/schur_eliminator_d_d_d.cc
@@ -37,7 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
diff --git a/internal/ceres/gradient_checking_cost_function.cc b/internal/ceres/gradient_checking_cost_function.cc
index 3edf95d..bca22e6 100644
--- a/internal/ceres/gradient_checking_cost_function.cc
+++ b/internal/ceres/gradient_checking_cost_function.cc
@@ -44,7 +44,7 @@
#include "ceres/problem_impl.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
-#include "ceres/runtime_numeric_diff_cost_function.h"
+#include "ceres/dynamic_numeric_diff_cost_function.h"
#include "ceres/stringprintf.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -84,14 +84,24 @@ class GradientCheckingCostFunction : public CostFunction {
double relative_precision,
const string& extra_info)
: function_(function),
- finite_diff_cost_function_(
- CreateRuntimeNumericDiffCostFunction(function,
- CENTRAL,
- relative_step_size)),
relative_precision_(relative_precision),
extra_info_(extra_info) {
- *mutable_parameter_block_sizes() = function->parameter_block_sizes();
+ DynamicNumericDiffCostFunction<CostFunction, CENTRAL>*
+ finite_diff_cost_function =
+ new DynamicNumericDiffCostFunction<CostFunction, CENTRAL>(
+ function,
+ DO_NOT_TAKE_OWNERSHIP,
+ relative_step_size);
+
+ const vector<int32>& parameter_block_sizes =
+ function->parameter_block_sizes();
+ for (int i = 0; i < parameter_block_sizes.size(); ++i) {
+ finite_diff_cost_function->AddParameterBlock(parameter_block_sizes[i]);
+ }
+ *mutable_parameter_block_sizes() = parameter_block_sizes;
set_num_residuals(function->num_residuals());
+ finite_diff_cost_function->SetNumResiduals(num_residuals());
+ finite_diff_cost_function_.reset(finite_diff_cost_function);
}
virtual ~GradientCheckingCostFunction() { }
@@ -107,7 +117,7 @@ class GradientCheckingCostFunction : public CostFunction {
int num_residuals = function_->num_residuals();
// Make space for the jacobians of the two methods.
- const vector<int16>& block_sizes = function_->parameter_block_sizes();
+ const vector<int32>& block_sizes = function_->parameter_block_sizes();
vector<Matrix> term_jacobians(block_sizes.size());
vector<Matrix> finite_difference_jacobians(block_sizes.size());
vector<double*> term_jacobian_pointers(block_sizes.size());
diff --git a/internal/ceres/gradient_checking_cost_function_test.cc b/internal/ceres/gradient_checking_cost_function_test.cc
index ac06503..caba2f6 100644
--- a/internal/ceres/gradient_checking_cost_function_test.cc
+++ b/internal/ceres/gradient_checking_cost_function_test.cc
@@ -264,7 +264,7 @@ TEST(GradientCheckingCostFunction, SmokeTest) {
// Trivial cost function that accepts a single argument.
class UnaryCostFunction : public CostFunction {
public:
- UnaryCostFunction(int num_residuals, int16 parameter_block_size) {
+ UnaryCostFunction(int num_residuals, int32 parameter_block_size) {
set_num_residuals(num_residuals);
mutable_parameter_block_sizes()->push_back(parameter_block_size);
}
@@ -284,8 +284,8 @@ class UnaryCostFunction : public CostFunction {
class BinaryCostFunction: public CostFunction {
public:
BinaryCostFunction(int num_residuals,
- int16 parameter_block1_size,
- int16 parameter_block2_size) {
+ int32 parameter_block1_size,
+ int32 parameter_block2_size) {
set_num_residuals(num_residuals);
mutable_parameter_block_sizes()->push_back(parameter_block1_size);
mutable_parameter_block_sizes()->push_back(parameter_block2_size);
@@ -305,9 +305,9 @@ class BinaryCostFunction: public CostFunction {
class TernaryCostFunction: public CostFunction {
public:
TernaryCostFunction(int num_residuals,
- int16 parameter_block1_size,
- int16 parameter_block2_size,
- int16 parameter_block3_size) {
+ int32 parameter_block1_size,
+ int32 parameter_block2_size,
+ int32 parameter_block3_size) {
set_num_residuals(num_residuals);
mutable_parameter_block_sizes()->push_back(parameter_block1_size);
mutable_parameter_block_sizes()->push_back(parameter_block2_size);
diff --git a/internal/ceres/implicit_schur_complement.cc b/internal/ceres/implicit_schur_complement.cc
index 32722bb..2da6235 100644
--- a/internal/ceres/implicit_schur_complement.cc
+++ b/internal/ceres/implicit_schur_complement.cc
@@ -35,21 +35,18 @@
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_solver.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
-ImplicitSchurComplement::ImplicitSchurComplement(int num_eliminate_blocks,
- bool preconditioner)
- : num_eliminate_blocks_(num_eliminate_blocks),
- preconditioner_(preconditioner),
- A_(NULL),
+ImplicitSchurComplement::ImplicitSchurComplement(
+ const LinearSolver::Options& options)
+ : options_(options),
D_(NULL),
- b_(NULL),
- block_diagonal_EtE_inverse_(NULL),
- block_diagonal_FtF_inverse_(NULL) {
+ b_(NULL) {
}
ImplicitSchurComplement::~ImplicitSchurComplement() {
@@ -61,7 +58,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrix& A,
// Since initialization is reasonably heavy, perhaps we can save on
// constructing a new object everytime.
if (A_ == NULL) {
- A_.reset(new PartitionedMatrixView(A, num_eliminate_blocks_));
+ A_.reset(PartitionedMatrixViewBase::Create(options_, A));
}
D_ = D;
@@ -71,7 +68,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrix& A,
// E'E and F'E.
if (block_diagonal_EtE_inverse_ == NULL) {
block_diagonal_EtE_inverse_.reset(A_->CreateBlockDiagonalEtE());
- if (preconditioner_) {
+ if (options_.preconditioner_type == JACOBI) {
block_diagonal_FtF_inverse_.reset(A_->CreateBlockDiagonalFtF());
}
rhs_.resize(A_->num_cols_f());
@@ -82,7 +79,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrix& A,
tmp_f_cols_.resize(A_->num_cols_f());
} else {
A_->UpdateBlockDiagonalEtE(block_diagonal_EtE_inverse_.get());
- if (preconditioner_) {
+ if (options_.preconditioner_type == JACOBI) {
A_->UpdateBlockDiagonalFtF(block_diagonal_FtF_inverse_.get());
}
}
@@ -91,7 +88,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrix& A,
// contributions from the diagonal D if it is non-null. Add that to
// the block diagonals and invert them.
AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get());
- if (preconditioner_) {
+ if (options_.preconditioner_type == JACOBI) {
AddDiagonalAndInvert((D_ == NULL) ? NULL : D_ + A_->num_cols_e(),
block_diagonal_FtF_inverse_.get());
}
diff --git a/internal/ceres/implicit_schur_complement.h b/internal/ceres/implicit_schur_complement.h
index c1bb6e1..c992bdc 100644
--- a/internal/ceres/implicit_schur_complement.h
+++ b/internal/ceres/implicit_schur_complement.h
@@ -35,6 +35,7 @@
#define CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_
#include "ceres/linear_operator.h"
+#include "ceres/linear_solver.h"
#include "ceres/partitioned_matrix_view.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
@@ -96,7 +97,7 @@ class ImplicitSchurComplement : public LinearOperator {
//
// TODO(sameeragarwal): Get rid of the two bools below and replace
// them with enums.
- ImplicitSchurComplement(int num_eliminate_blocks, bool preconditioner);
+ ImplicitSchurComplement(const LinearSolver::Options& options);
virtual ~ImplicitSchurComplement();
// Initialize the Schur complement for a linear least squares
@@ -142,10 +143,9 @@ class ImplicitSchurComplement : public LinearOperator {
void AddDiagonalAndInvert(const double* D, BlockSparseMatrix* matrix);
void UpdateRhs();
- int num_eliminate_blocks_;
- bool preconditioner_;
+ const LinearSolver::Options& options_;
- scoped_ptr<PartitionedMatrixView> A_;
+ scoped_ptr<PartitionedMatrixViewBase> A_;
const double* D_;
const double* b_;
diff --git a/internal/ceres/implicit_schur_complement_test.cc b/internal/ceres/implicit_schur_complement_test.cc
index 1694273..3369ecb 100644
--- a/internal/ceres/implicit_schur_complement_test.cc
+++ b/internal/ceres/implicit_schur_complement_test.cc
@@ -120,7 +120,10 @@ class ImplicitSchurComplementTest : public ::testing::Test {
Vector reference_solution;
ReducedLinearSystemAndSolution(D, &lhs, &rhs, &reference_solution);
- ImplicitSchurComplement isc(num_eliminate_blocks_, true);
+ LinearSolver::Options options;
+ options.elimination_groups.push_back(num_eliminate_blocks_);
+ options.preconditioner_type = JACOBI;
+ ImplicitSchurComplement isc(options);
isc.Init(*A_, D, b_.get());
int num_sc_cols = lhs.cols();
diff --git a/internal/ceres/integral_types.h b/internal/ceres/integral_types.h
index 01e0493..d4913f5 100644
--- a/internal/ceres/integral_types.h
+++ b/internal/ceres/integral_types.h
@@ -77,7 +77,6 @@ struct UnsignedInteger {
#undef CERES_INTSIZE
typedef Integer< 8>::type int8;
-typedef Integer<16>::type int16;
typedef Integer<32>::type int32;
typedef Integer<64>::type int64;
diff --git a/internal/ceres/iterative_schur_complement_solver.cc b/internal/ceres/iterative_schur_complement_solver.cc
index 1aac565..6de410b 100644
--- a/internal/ceres/iterative_schur_complement_solver.cc
+++ b/internal/ceres/iterative_schur_complement_solver.cc
@@ -38,6 +38,7 @@
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/conjugate_gradients_solver.h"
+#include "ceres/detect_structure.h"
#include "ceres/implicit_schur_complement.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
@@ -69,35 +70,36 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
EventLogger event_logger("IterativeSchurComplementSolver::Solve");
CHECK_NOTNULL(A->block_structure());
-
+ const int num_eliminate_blocks = options_.elimination_groups[0];
// Initialize a ImplicitSchurComplement object.
if (schur_complement_ == NULL) {
- schur_complement_.reset(
- new ImplicitSchurComplement(options_.elimination_groups[0],
- options_.preconditioner_type == JACOBI));
+ DetectStructure(*(A->block_structure()),
+ num_eliminate_blocks,
+ &options_.row_block_size,
+ &options_.e_block_size,
+ &options_.f_block_size);
+ schur_complement_.reset(new ImplicitSchurComplement(options_));
}
schur_complement_->Init(*A, per_solve_options.D, b);
const int num_schur_complement_blocks =
- A->block_structure()->cols.size() - options_.elimination_groups[0];
+ A->block_structure()->cols.size() - num_eliminate_blocks;
if (num_schur_complement_blocks == 0) {
VLOG(2) << "No parameter blocks left in the schur complement.";
LinearSolver::Summary cg_summary;
cg_summary.num_iterations = 0;
- cg_summary.termination_type = TOLERANCE;
+ cg_summary.termination_type = LINEAR_SOLVER_SUCCESS;
schur_complement_->BackSubstitute(NULL, x);
return cg_summary;
}
// Initialize the solution to the Schur complement system to zero.
- //
- // TODO(sameeragarwal): There maybe a better initialization than an
- // all zeros solution. Explore other cheap starting points.
reduced_linear_system_solution_.resize(schur_complement_->num_rows());
reduced_linear_system_solution_.setZero();
- // Instantiate a conjugate gradient solver that runs on the Schur complement
- // matrix with the block diagonal of the matrix F'F as the preconditioner.
+ // Instantiate a conjugate gradient solver that runs on the Schur
+ // complement matrix with the block diagonal of the matrix F'F as
+ // the preconditioner.
LinearSolver::Options cg_options;
cg_options.max_num_iterations = options_.max_num_iterations;
ConjugateGradientsSolver cg_solver(cg_options);
@@ -108,6 +110,8 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
Preconditioner::Options preconditioner_options;
preconditioner_options.type = options_.preconditioner_type;
+ preconditioner_options.visibility_clustering_type =
+ options_.visibility_clustering_type;
preconditioner_options.sparse_linear_algebra_library_type =
options_.sparse_linear_algebra_library_type;
preconditioner_options.num_threads = options_.num_threads;
@@ -149,26 +153,26 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
preconditioner_->Update(*A, per_solve_options.D);
cg_per_solve_options.preconditioner = preconditioner_.get();
}
-
event_logger.AddEvent("Setup");
LinearSolver::Summary cg_summary;
cg_summary.num_iterations = 0;
- cg_summary.termination_type = FAILURE;
+ cg_summary.termination_type = LINEAR_SOLVER_FAILURE;
+ // TODO(sameeragarwal): Refactor preconditioners to return a more
+ // sane message.
+ cg_summary.message = "Preconditioner update failed.";
if (preconditioner_update_was_successful) {
cg_summary = cg_solver.Solve(schur_complement_.get(),
schur_complement_->rhs().data(),
cg_per_solve_options,
reduced_linear_system_solution_.data());
- if (cg_summary.termination_type != FAILURE) {
+ if (cg_summary.termination_type != LINEAR_SOLVER_FAILURE &&
+ cg_summary.termination_type != LINEAR_SOLVER_FATAL_ERROR) {
schur_complement_->BackSubstitute(
reduced_linear_system_solution_.data(), x);
}
}
-
- VLOG(2) << "CG Iterations : " << cg_summary.num_iterations;
-
event_logger.AddEvent("Solve");
return cg_summary;
}
diff --git a/internal/ceres/jet_quaternion_integration_test.cc b/internal/ceres/jet_quaternion_integration_test.cc
deleted file mode 100644
index 63101fb..0000000
--- a/internal/ceres/jet_quaternion_integration_test.cc
+++ /dev/null
@@ -1,201 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-//
-// Tests the use of Cere's Jet type with the quaternions found in util/math/. In
-// theory, the unittests for the quaternion class should be type parameterized
-// to make for easier testing of instantiations of the quaternion class, but it
-// is not so, and not obviously worth the work to make the switch at this time.
-
-#include "base/stringprintf.h"
-#include "gtest/gtest.h"
-#include "util/math/mathlimits.h"
-#include "util/math/matrix3x3.h"
-#include "util/math/quaternion.h"
-#include "util/math/vector3.h"
-#include "ceres/test_util.h"
-#include "ceres/jet.h"
-#include "ceres/jet_traits.h"
-
-namespace ceres {
-namespace internal {
-
-// Use a 4-element derivative to simulate the case where each of the
-// quaternion elements are derivative parameters.
-typedef Jet<double, 4> J;
-
-struct JetTraitsTest : public ::testing::Test {
- protected:
- JetTraitsTest()
- : a(J(1.1, 0), J(2.1, 1), J(3.1, 2), J(4.1, 3)),
- b(J(0.1, 0), J(1.1, 1), J(2.1, 2), J(5.0, 3)),
- double_a(a[0].a, a[1].a, a[2].a, a[3].a),
- double_b(b[0].a, b[1].a, b[2].a, b[3].a) {
- // The quaternions should be valid rotations, so normalize them.
- a.Normalize();
- b.Normalize();
- double_a.Normalize();
- double_b.Normalize();
- }
-
- virtual ~JetTraitsTest() {}
-
- // A couple of arbitrary normalized quaternions.
- Quaternion<J> a, b;
-
- // The equivalent of a, b but in scalar form.
- Quaternion<double> double_a, double_b;
-};
-
-// Compare scalar multiplication to jet multiplication. Ignores derivatives.
-TEST_F(JetTraitsTest, QuaternionScalarMultiplicationWorks) {
- Quaternion<J> c = a * b;
- Quaternion<double> double_c = double_a * double_b;
-
- for (int i = 0; i < 4; ++i) {
- EXPECT_EQ(double_c[i], c[i].a);
- }
-}
-
-// Compare scalar slerp to jet slerp. Ignores derivatives.
-TEST_F(JetTraitsTest, QuaternionScalarSlerpWorks) {
- const J fraction(0.1);
- Quaternion<J> c = Quaternion<J>::Slerp(a, b, fraction);
- Quaternion<double> double_c =
- Quaternion<double>::Slerp(double_a, double_b, fraction.a);
-
- for (int i = 0; i < 4; ++i) {
- EXPECT_EQ(double_c[i], c[i].a);
- }
-}
-
-// On a 32-bit optimized build, the mismatch is about 1.4e-14.
-double const kTolerance = 1e-13;
-
-void ExpectJetsClose(const J &x, const J &y) {
- ExpectClose(x.a, y.a, kTolerance);
- ExpectClose(x.v[0], y.v[0], kTolerance);
- ExpectClose(x.v[1], y.v[1], kTolerance);
- ExpectClose(x.v[2], y.v[2], kTolerance);
- ExpectClose(x.v[3], y.v[3], kTolerance);
-}
-
-void ExpectQuaternionsClose(const Quaternion<J>& x, const Quaternion<J>& y) {
- for (int i = 0; i < 4; ++i) {
- ExpectJetsClose(x[i], y[i]);
- }
-}
-
-// Compare jet slurp to jet slerp using identies, checking derivatives.
-TEST_F(JetTraitsTest, CheckSlerpIdentitiesWithNontrivialDerivatives) {
- // Do a slerp to 0.75 directly.
- Quaternion<J> direct = Quaternion<J>::Slerp(a, b, J(0.75));
-
- // Now go part way twice, in theory ending at the same place.
- Quaternion<J> intermediate = Quaternion<J>::Slerp(a, b, J(0.5));
- Quaternion<J> indirect = Quaternion<J>::Slerp(intermediate, b, J(0.5));
-
- // Check that the destination is the same, including derivatives.
- ExpectQuaternionsClose(direct, indirect);
-}
-
-TEST_F(JetTraitsTest, CheckAxisAngleIsInvertibleWithNontrivialDerivatives) {
- Vector3<J> axis;
- J angle;
- a.GetAxisAngle(&axis, &angle);
- b.SetFromAxisAngle(axis, angle);
-
- ExpectQuaternionsClose(a, b);
-}
-
-TEST_F(JetTraitsTest,
- CheckRotationMatrixIsInvertibleWithNontrivialDerivatives) {
- Vector3<J> axis;
- J angle;
- Matrix3x3<J> R;
- a.ToRotationMatrix(&R);
- b.SetFromRotationMatrix(R);
-
- ExpectQuaternionsClose(a, b);
-}
-
-// This doesn't check correctnenss, only that the instantiation compiles.
-TEST_F(JetTraitsTest, CheckRotationBetweenIsCompilable) {
- // Get two arbitrary vectors x and y.
- Vector3<J> x, y;
- J ignored_angle;
- a.GetAxisAngle(&x, &ignored_angle);
- b.GetAxisAngle(&y, &ignored_angle);
-
- Quaternion<J> between_x_and_y = Quaternion<J>::RotationBetween(x, y);
-
- // Prevent optimizing this away.
- EXPECT_NE(between_x_and_y[0].a, 0.0);
-}
-
-TEST_F(JetTraitsTest, CheckRotatedWorksAsExpected) {
- // Get two arbitrary vectors x and y.
- Vector3<J> x;
- J ignored_angle;
- a.GetAxisAngle(&x, &ignored_angle);
-
- // Rotate via a quaternion.
- Vector3<J> y = b.Rotated(x);
-
- // Rotate via a rotation matrix.
- Matrix3x3<J> R;
- b.ToRotationMatrix(&R);
- Vector3<J> yp = R * x;
-
- ExpectJetsClose(yp[0], y[0]);
- ExpectJetsClose(yp[1], y[1]);
- ExpectJetsClose(yp[2], y[2]);
-}
-
-TEST_F(JetTraitsTest, CheckRotatedWorksAsExpectedWithDoubles) {
- // Get two arbitrary vectors x and y.
- Vector3<double> x;
- double ignored_angle;
- double_a.GetAxisAngle(&x, &ignored_angle);
-
- // Rotate via a quaternion.
- Vector3<double> y = double_b.Rotated(x);
-
- // Rotate via a rotation matrix.
- Matrix3x3<double> R;
- double_b.ToRotationMatrix(&R);
- Vector3<double> yp = R * x;
-
- ExpectClose(yp[0], y[0], kTolerance);
- ExpectClose(yp[1], y[1], kTolerance);
- ExpectClose(yp[2], y[2], kTolerance);
-}
-
-} // namespace internal
-} // namespace ceres
diff --git a/internal/ceres/lapack.cc b/internal/ceres/lapack.cc
index 73bfa69..e124d75 100644
--- a/internal/ceres/lapack.cc
+++ b/internal/ceres/lapack.cc
@@ -29,6 +29,9 @@
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/lapack.h"
+
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
#include "glog/logging.h"
// C interface to the LAPACK Cholesky factorization and triangular solve.
@@ -63,12 +66,14 @@ extern "C" void dgels_(char* uplo,
namespace ceres {
namespace internal {
-int LAPACK::SolveInPlaceUsingCholesky(int num_rows,
- const double* in_lhs,
- double* rhs_and_solution) {
+LinearSolverTerminationType LAPACK::SolveInPlaceUsingCholesky(
+ int num_rows,
+ const double* in_lhs,
+ double* rhs_and_solution,
+ string* message) {
#ifdef CERES_NO_LAPACK
LOG(FATAL) << "Ceres was built without a BLAS library.";
- return -1;
+ return LINEAR_SOLVER_FATAL_ERROR;
#else
char uplo = 'L';
int n = num_rows;
@@ -77,17 +82,33 @@ int LAPACK::SolveInPlaceUsingCholesky(int num_rows,
double* lhs = const_cast<double*>(in_lhs);
dpotrf_(&uplo, &n, lhs, &n, &info);
- if (info != 0) {
- LOG(INFO) << "Cholesky factorization (dpotrf) failed: " << info;
- return info;
+ if (info < 0) {
+ LOG(FATAL) << "Congratulations, you found a bug in Ceres."
+ << "Please report it."
+ << "LAPACK::dpotrf fatal error."
+ << "Argument: " << -info << " is invalid.";
+ return LINEAR_SOLVER_FATAL_ERROR;
+ }
+
+ if (info > 0) {
+ *message =
+ StringPrintf(
+ "LAPACK::dpotrf numerical failure. "
+ "The leading minor of order %d is not positive definite.", info);
+ return LINEAR_SOLVER_FAILURE;
}
dpotrs_(&uplo, &n, &nrhs, lhs, &n, rhs_and_solution, &n, &info);
- if (info != 0) {
- LOG(INFO) << "Triangular solve (dpotrs) failed: " << info;
+ if (info < 0) {
+ LOG(FATAL) << "Congratulations, you found a bug in Ceres."
+ << "Please report it."
+ << "LAPACK::dpotrs fatal error."
+ << "Argument: " << -info << " is invalid.";
+ return LINEAR_SOLVER_FATAL_ERROR;
}
- return info;
+ *message = "Success";
+ return LINEAR_SOLVER_SUCCESS;
#endif
};
@@ -113,20 +134,27 @@ int LAPACK::EstimateWorkSizeForQR(int num_rows, int num_cols) {
&lwork,
&info);
- CHECK_EQ(info, 0);
- return work;
+ if (info < 0) {
+ LOG(FATAL) << "Congratulations, you found a bug in Ceres."
+ << "Please report it."
+ << "LAPACK::dgels fatal error."
+ << "Argument: " << -info << " is invalid.";
+ }
+ return static_cast<int>(work);
#endif
}
-int LAPACK::SolveUsingQR(int num_rows,
- int num_cols,
- const double* in_lhs,
- int work_size,
- double* work,
- double* rhs_and_solution) {
+LinearSolverTerminationType LAPACK::SolveInPlaceUsingQR(
+ int num_rows,
+ int num_cols,
+ const double* in_lhs,
+ int work_size,
+ double* work,
+ double* rhs_and_solution,
+ string* message) {
#ifdef CERES_NO_LAPACK
LOG(FATAL) << "Ceres was built without a LAPACK library.";
- return -1;
+ return LINEAR_SOLVER_FATAL_ERROR;
#else
char trans = 'N';
int m = num_rows;
@@ -149,7 +177,15 @@ int LAPACK::SolveUsingQR(int num_rows,
&work_size,
&info);
- return info;
+ if (info < 0) {
+ LOG(FATAL) << "Congratulations, you found a bug in Ceres."
+ << "Please report it."
+ << "LAPACK::dgels fatal error."
+ << "Argument: " << -info << " is invalid.";
+ }
+
+ *message = "Success.";
+ return LINEAR_SOLVER_SUCCESS;
#endif
}
diff --git a/internal/ceres/lapack.h b/internal/ceres/lapack.h
index 4f3a88c..8933c2c 100644
--- a/internal/ceres/lapack.h
+++ b/internal/ceres/lapack.h
@@ -31,6 +31,10 @@
#ifndef CERES_INTERNAL_LAPACK_H_
#define CERES_INTERNAL_LAPACK_H_
+#include <string>
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
+
namespace ceres {
namespace internal {
@@ -47,10 +51,14 @@ class LAPACK {
//
// This function uses the LAPACK dpotrf and dpotrs routines.
//
- // The return value is zero if the solve is successful.
- static int SolveInPlaceUsingCholesky(int num_rows,
- const double* lhs,
- double* rhs_and_solution);
+ // The return value and the message string together describe whether
+ // the solver terminated successfully or not and if so, what was the
+ // reason for failure.
+ static LinearSolverTerminationType SolveInPlaceUsingCholesky(
+ int num_rows,
+ const double* lhs,
+ double* rhs_and_solution,
+ string* message);
// The SolveUsingQR function requires a buffer for its temporary
// computation. This function given the size of the lhs matrix will
@@ -73,13 +81,17 @@ class LAPACK {
//
// This function uses the LAPACK dgels routine.
//
- // The return value is zero if the solve is successful.
- static int SolveUsingQR(int num_rows,
- int num_cols,
- const double* lhs,
- int work_size,
- double* work,
- double* rhs_and_solution);
+ // The return value and the message string together describe whether
+ // the solver terminated successfully or not and if so, what was the
+ // reason for failure.
+ static LinearSolverTerminationType SolveInPlaceUsingQR(
+ int num_rows,
+ int num_cols,
+ const double* lhs,
+ int work_size,
+ double* work,
+ double* rhs_and_solution,
+ string* message);
};
} // namespace internal
diff --git a/internal/ceres/levenberg_marquardt_strategy.cc b/internal/ceres/levenberg_marquardt_strategy.cc
index fad7c1f..ce3b69a 100644
--- a/internal/ceres/levenberg_marquardt_strategy.cc
+++ b/internal/ceres/levenberg_marquardt_strategy.cc
@@ -105,10 +105,13 @@ TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep(
// do not need to be modified.
LinearSolver::Summary linear_solver_summary =
linear_solver_->Solve(jacobian, residuals, solve_options, step);
- if (linear_solver_summary.termination_type == FAILURE ||
- !IsArrayValid(num_parameters, step)) {
+
+ if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+ LOG(WARNING) << "Linear solver fatal error.";
+ } else if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE ||
+ !IsArrayValid(num_parameters, step)) {
LOG(WARNING) << "Linear solver failure. Failed to compute a finite step.";
- linear_solver_summary.termination_type = FAILURE;
+ linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE;
} else {
VectorRef(step, num_parameters) *= -1.0;
}
diff --git a/internal/ceres/levenberg_marquardt_strategy_test.cc b/internal/ceres/levenberg_marquardt_strategy_test.cc
index 86302b7..ac7ddbc 100644
--- a/internal/ceres/levenberg_marquardt_strategy_test.cc
+++ b/internal/ceres/levenberg_marquardt_strategy_test.cc
@@ -150,7 +150,7 @@ TEST(LevenbergMarquardtStrategy, CorrectDiagonalToLinearSolver) {
TrustRegionStrategy::Summary summary =
lms.ComputeStep(pso, &dsm, &residual, x);
- EXPECT_EQ(summary.termination_type, FAILURE);
+ EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_FAILURE);
}
}
diff --git a/internal/ceres/line_search.cc b/internal/ceres/line_search.cc
index 8323896..7ff1164 100644
--- a/internal/ceres/line_search.cc
+++ b/internal/ceres/line_search.cc
@@ -28,7 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
+#include <iomanip>
+#include <iostream> // NOLINT
+
#include "ceres/line_search.h"
#include "ceres/fpclassify.h"
@@ -41,6 +43,8 @@
namespace ceres {
namespace internal {
namespace {
+// Precision used for floating point values in error message output.
+const int kErrorMessageNumericPrecision = 8;
FunctionSample ValueSample(const double x, const double value) {
FunctionSample sample;
@@ -64,13 +68,12 @@ FunctionSample ValueAndGradientSample(const double x,
} // namespace
+
+std::ostream& operator<<(std::ostream &os, const FunctionSample& sample);
+
// Convenience stream operator for pushing FunctionSamples into log messages.
-std::ostream& operator<<(std::ostream &os,
- const FunctionSample& sample) {
- os << "[x: " << sample.x << ", value: " << sample.value
- << ", gradient: " << sample.gradient << ", value_is_valid: "
- << std::boolalpha << sample.value_is_valid << ", gradient_is_valid: "
- << std::boolalpha << sample.gradient_is_valid << "]";
+std::ostream& operator<<(std::ostream &os, const FunctionSample& sample) {
+ os << sample.ToDebugString();
return os;
}
@@ -170,6 +173,7 @@ double LineSearch::InterpolatingPolynomialMinimizingStepSize(
// to avoid replicating current.value_is_valid == false
// behaviour in WolfeLineSearch.
CHECK(lowerbound.value_is_valid)
+ << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
<< "Ceres bug: lower-bound sample for interpolation is invalid, "
<< "please contact the developers!, interpolation_type: "
<< LineSearchInterpolationTypeToString(interpolation_type)
@@ -237,20 +241,26 @@ void ArmijoLineSearch::Search(const double step_size_estimate,
FunctionSample current = ValueAndGradientSample(step_size_estimate, 0.0, 0.0);
current.value_is_valid = false;
- const bool interpolation_uses_gradients =
+ // As the Armijo line search algorithm always uses the initial point, for
+ // which both the function value and derivative are known, when fitting a
+ // minimizing polynomial, we can fit up to a quadratic without requiring the
+ // gradient at the current query point.
+ const bool interpolation_uses_gradient_at_current_sample =
options().interpolation_type == CUBIC;
const double descent_direction_max_norm =
static_cast<const LineSearchFunction*>(function)->DirectionInfinityNorm();
++summary->num_function_evaluations;
- if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; }
+ if (interpolation_uses_gradient_at_current_sample) {
+ ++summary->num_gradient_evaluations;
+ }
current.value_is_valid =
function->Evaluate(current.x,
&current.value,
- interpolation_uses_gradients
+ interpolation_uses_gradient_at_current_sample
? &current.gradient : NULL);
current.gradient_is_valid =
- interpolation_uses_gradients && current.value_is_valid;
+ interpolation_uses_gradient_at_current_sample && current.value_is_valid;
while (!current.value_is_valid ||
current.value > (initial_cost
+ options().sufficient_decrease
@@ -265,7 +275,7 @@ void ArmijoLineSearch::Search(const double step_size_estimate,
"satisfying the sufficient decrease condition within "
"specified max_num_iterations: %d.",
options().max_num_iterations);
- LOG(WARNING) << summary->error;
+ LOG_IF(WARNING, !options().is_silent) << summary->error;
return;
}
@@ -283,7 +293,7 @@ void ArmijoLineSearch::Search(const double step_size_estimate,
StringPrintf("Line search failed: step_size too small: %.5e "
"with descent_direction_max_norm: %.5e.", step_size,
descent_direction_max_norm);
- LOG(WARNING) << summary->error;
+ LOG_IF(WARNING, !options().is_silent) << summary->error;
return;
}
@@ -291,14 +301,16 @@ void ArmijoLineSearch::Search(const double step_size_estimate,
current.x = step_size;
++summary->num_function_evaluations;
- if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; }
+ if (interpolation_uses_gradient_at_current_sample) {
+ ++summary->num_gradient_evaluations;
+ }
current.value_is_valid =
function->Evaluate(current.x,
&current.value,
- interpolation_uses_gradients
+ interpolation_uses_gradient_at_current_sample
? &current.gradient : NULL);
current.gradient_is_valid =
- interpolation_uses_gradients && current.value_is_valid;
+ interpolation_uses_gradient_at_current_sample && current.value_is_valid;
}
summary->optimal_step_size = current.x;
@@ -350,33 +362,36 @@ void WolfeLineSearch::Search(const double step_size_estimate,
&bracket_low,
&bracket_high,
&do_zoom_search,
- summary) &&
- summary->num_iterations < options().max_num_iterations) {
- // Failed to find either a valid point or a valid bracket, but we did not
- // run out of iterations.
+ summary)) {
+ // Failed to find either a valid point, a valid bracket satisfying the Wolfe
+ // conditions, or even a step size > minimum tolerance satisfying the Armijo
+ // condition.
return;
}
+
if (!do_zoom_search) {
// Either: Bracketing phase already found a point satisfying the strong
// Wolfe conditions, thus no Zoom required.
//
// Or: Bracketing failed to find a valid bracket or a point satisfying the
- // strong Wolfe conditions within max_num_iterations. As this is an
- // 'artificial' constraint, and we would otherwise fail to produce a valid
- // point when ArmijoLineSearch would succeed, we return the lowest point
- // found thus far which satsifies the Armijo condition (but not the Wolfe
- // conditions).
- CHECK(bracket_low.value_is_valid)
- << "Ceres bug: Bracketing produced an invalid bracket_low, please "
- << "contact the developers!, bracket_low: " << bracket_low
- << ", bracket_high: " << bracket_high << ", num_iterations: "
- << summary->num_iterations << ", max_num_iterations: "
- << options().max_num_iterations;
+ // strong Wolfe conditions within max_num_iterations, or whilst searching
+ // shrank the bracket width until it was below our minimum tolerance.
+ // As these are 'artificial' constraints, and we would otherwise fail to
+ // produce a valid point when ArmijoLineSearch would succeed, we return the
+ // point with the lowest cost found thus far which satsifies the Armijo
+ // condition (but not the Wolfe conditions).
summary->optimal_step_size = bracket_low.x;
summary->success = true;
return;
}
+ VLOG(3) << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
+ << "Starting line search zoom phase with bracket_low: "
+ << bracket_low << ", bracket_high: " << bracket_high
+ << ", bracket width: " << fabs(bracket_low.x - bracket_high.x)
+ << ", bracket abs delta cost: "
+ << fabs(bracket_low.value - bracket_high.value);
+
// Wolfe Zoom phase: Called when the Bracketing phase finds an interval of
// non-zero, finite width that should bracket step sizes which satisfy the
// (strong) Wolfe conditions (before finding a step size that satisfies the
@@ -419,11 +434,22 @@ void WolfeLineSearch::Search(const double step_size_estimate,
summary->success = true;
}
-// Returns true iff bracket_low & bracket_high bound a bracket that contains
-// points which satisfy the strong Wolfe conditions. Otherwise, on return false,
-// if we stopped searching due to the 'artificial' condition of reaching
-// max_num_iterations, bracket_low is the step size amongst all those
-// tested, which satisfied the Armijo decrease condition and minimized f().
+// Returns true if either:
+//
+// A termination condition satisfying the (strong) Wolfe bracketing conditions
+// is found:
+//
+// - A valid point, defined as a bracket of zero width [zoom not required].
+// - A valid bracket (of width > tolerance), [zoom required].
+//
+// Or, searching was stopped due to an 'artificial' constraint, i.e. not
+// a condition imposed / required by the underlying algorithm, but instead an
+// engineering / implementation consideration. But a step which exceeds the
+// minimum step size, and satsifies the Armijo condition was still found,
+// and should thus be used [zoom not required].
+//
+// Returns false if no step size > minimum step size was found which
+// satisfies at least the Armijo condition.
bool WolfeLineSearch::BracketingPhase(
const FunctionSample& initial_position,
const double step_size_estimate,
@@ -437,23 +463,28 @@ bool WolfeLineSearch::BracketingPhase(
FunctionSample current = ValueAndGradientSample(step_size_estimate, 0.0, 0.0);
current.value_is_valid = false;
- const bool interpolation_uses_gradients =
- options().interpolation_type == CUBIC;
const double descent_direction_max_norm =
static_cast<const LineSearchFunction*>(function)->DirectionInfinityNorm();
*do_zoom_search = false;
*bracket_low = initial_position;
+ // As we require the gradient to evaluate the Wolfe condition, we always
+ // calculate it together with the value, irrespective of the interpolation
+ // type. As opposed to only calculating the gradient after the Armijo
+ // condition is satisifed, as the computational saving from this approach
+ // would be slight (perhaps even negative due to the extra call). Also,
+ // always calculating the value & gradient together protects against us
+ // reporting invalid solutions if the cost function returns slightly different
+ // function values when evaluated with / without gradients (due to numerical
+ // issues).
++summary->num_function_evaluations;
- if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; }
+ ++summary->num_gradient_evaluations;
current.value_is_valid =
function->Evaluate(current.x,
&current.value,
- interpolation_uses_gradients
- ? &current.gradient : NULL);
- current.gradient_is_valid =
- interpolation_uses_gradients && current.value_is_valid;
+ &current.gradient);
+ current.gradient_is_valid = current.value_is_valid;
while (true) {
++summary->num_iterations;
@@ -470,22 +501,14 @@ bool WolfeLineSearch::BracketingPhase(
*do_zoom_search = true;
*bracket_low = previous;
*bracket_high = current;
+ VLOG(3) << std::scientific
+ << std::setprecision(kErrorMessageNumericPrecision)
+ << "Bracket found: current step (" << current.x
+ << ") violates Armijo sufficient condition, or has passed an "
+ << "inflection point of f() based on value.";
break;
}
- // Irrespective of the interpolation type we are using, we now need the
- // gradient at the current point (which satisfies the Armijo condition)
- // in order to check the strong Wolfe conditions.
- if (!interpolation_uses_gradients) {
- ++summary->num_function_evaluations;
- ++summary->num_gradient_evaluations;
- current.value_is_valid =
- function->Evaluate(current.x,
- &current.value,
- &current.gradient);
- current.gradient_is_valid = current.value_is_valid;
- }
-
if (current.value_is_valid &&
fabs(current.gradient) <=
-options().sufficient_curvature_decrease * initial_position.gradient) {
@@ -493,6 +516,11 @@ bool WolfeLineSearch::BracketingPhase(
// valid termination point, therefore a Zoom not required.
*bracket_low = current;
*bracket_high = current;
+ VLOG(3) << std::scientific
+ << std::setprecision(kErrorMessageNumericPrecision)
+ << "Bracketing phase found step size: " << current.x
+ << ", satisfying strong Wolfe conditions, initial_position: "
+ << initial_position << ", current: " << current;
break;
} else if (current.value_is_valid && current.gradient >= 0) {
@@ -505,6 +533,30 @@ bool WolfeLineSearch::BracketingPhase(
// Note inverse ordering from first bracket case.
*bracket_low = current;
*bracket_high = previous;
+ VLOG(3) << "Bracket found: current step (" << current.x
+ << ") satisfies Armijo, but has gradient >= 0, thus have passed "
+ << "an inflection point of f().";
+ break;
+
+ } else if (current.value_is_valid &&
+ fabs(current.x - previous.x) * descent_direction_max_norm
+ < options().min_step_size) {
+ // We have shrunk the search bracket to a width less than our tolerance,
+ // and still not found either a point satisfying the strong Wolfe
+ // conditions, or a valid bracket containing such a point. Stop searching
+ // and set bracket_low to the size size amongst all those tested which
+ // minimizes f() and satisfies the Armijo condition.
+ LOG_IF(WARNING, !options().is_silent)
+ << "Line search failed: Wolfe bracketing phase shrank "
+ << "bracket width: " << fabs(current.x - previous.x)
+ << ", to < tolerance: " << options().min_step_size
+ << ", with descent_direction_max_norm: "
+ << descent_direction_max_norm << ", and failed to find "
+ << "a point satisfying the strong Wolfe conditions or a "
+ << "bracketing containing such a point. Accepting "
+ << "point found satisfying Armijo condition only, to "
+ << "allow continuation.";
+ *bracket_low = current;
break;
} else if (summary->num_iterations >= options().max_num_iterations) {
@@ -516,14 +568,14 @@ bool WolfeLineSearch::BracketingPhase(
"find a point satisfying strong Wolfe conditions, or a "
"bracket containing such a point within specified "
"max_num_iterations: %d", options().max_num_iterations);
- LOG(WARNING) << summary->error;
+ LOG_IF(WARNING, !options().is_silent) << summary->error;
// Ensure that bracket_low is always set to the step size amongst all
// those tested which minimizes f() and satisfies the Armijo condition
// when we terminate due to the 'artificial' max_num_iterations condition.
*bracket_low =
current.value_is_valid && current.value < bracket_low->value
? current : *bracket_low;
- return false;
+ break;
}
// Either: f(current) is invalid; or, f(current) is valid, but does not
// satisfy the strong Wolfe conditions itself, or the conditions for
@@ -555,7 +607,7 @@ bool WolfeLineSearch::BracketingPhase(
StringPrintf("Line search failed: step_size too small: %.5e "
"with descent_direction_max_norm: %.5e", step_size,
descent_direction_max_norm);
- LOG(WARNING) << summary->error;
+ LOG_IF(WARNING, !options().is_silent) << summary->error;
return false;
}
@@ -563,17 +615,22 @@ bool WolfeLineSearch::BracketingPhase(
current.x = step_size;
++summary->num_function_evaluations;
- if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; }
+ ++summary->num_gradient_evaluations;
current.value_is_valid =
function->Evaluate(current.x,
&current.value,
- interpolation_uses_gradients
- ? &current.gradient : NULL);
- current.gradient_is_valid =
- interpolation_uses_gradients && current.value_is_valid;
+ &current.gradient);
+ current.gradient_is_valid = current.value_is_valid;
+ }
+
+ // Ensure that even if a valid bracket was found, we will only mark a zoom
+ // as required if the bracket's width is greater than our minimum tolerance.
+ if (*do_zoom_search &&
+ fabs(bracket_high->x - bracket_low->x) * descent_direction_max_norm
+ < options().min_step_size) {
+ *do_zoom_search = false;
}
- // Either we have a valid point, defined as a bracket of zero width, in which
- // case no zoom is required, or a valid bracket in which to zoom.
+
return true;
}
@@ -589,6 +646,7 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
Function* function = options().function;
CHECK(bracket_low.value_is_valid && bracket_low.gradient_is_valid)
+ << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
<< "Ceres bug: f_low input to Wolfe Zoom invalid, please contact "
<< "the developers!, initial_position: " << initial_position
<< ", bracket_low: " << bracket_low
@@ -599,22 +657,46 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
// not have been calculated (if bracket_high.value does not satisfy the
// Armijo sufficient decrease condition and interpolation method does not
// require it).
+ //
+ // We also do not require that: bracket_low.value < bracket_high.value,
+ // although this is typical. This is to deal with the case when
+ // bracket_low = initial_position, bracket_high is the first sample,
+ // and bracket_high does not satisfy the Armijo condition, but still has
+ // bracket_high.value < initial_position.value.
CHECK(bracket_high.value_is_valid)
+ << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
<< "Ceres bug: f_high input to Wolfe Zoom invalid, please "
<< "contact the developers!, initial_position: " << initial_position
<< ", bracket_low: " << bracket_low
<< ", bracket_high: "<< bracket_high;
- CHECK_LT(bracket_low.gradient *
- (bracket_high.x - bracket_low.x), 0.0)
- << "Ceres bug: f_high input to Wolfe Zoom does not satisfy gradient "
- << "condition combined with f_low, please contact the developers!"
- << ", initial_position: " << initial_position
- << ", bracket_low: " << bracket_low
- << ", bracket_high: "<< bracket_high;
+
+ if (bracket_low.gradient * (bracket_high.x - bracket_low.x) >= 0) {
+ // The third condition for a valid initial bracket:
+ //
+ // 3. bracket_high is chosen after bracket_low, s.t.
+ // bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0.
+ //
+ // is not satisfied. As this can happen when the users' cost function
+ // returns inconsistent gradient values relative to the function values,
+ // we do not CHECK_LT(), but we do stop processing and return an invalid
+ // value.
+ summary->error =
+ StringPrintf("Line search failed: Wolfe zoom phase passed a bracket "
+ "which does not satisfy: bracket_low.gradient * "
+ "(bracket_high.x - bracket_low.x) < 0 [%.8e !< 0] "
+ "with initial_position: %s, bracket_low: %s, bracket_high:"
+ " %s, the most likely cause of which is the cost function "
+ "returning inconsistent gradient & function values.",
+ bracket_low.gradient * (bracket_high.x - bracket_low.x),
+ initial_position.ToDebugString().c_str(),
+ bracket_low.ToDebugString().c_str(),
+ bracket_high.ToDebugString().c_str());
+ LOG_IF(WARNING, !options().is_silent) << summary->error;
+ solution->value_is_valid = false;
+ return false;
+ }
const int num_bracketing_iterations = summary->num_iterations;
- const bool interpolation_uses_gradients =
- options().interpolation_type == CUBIC;
const double descent_direction_max_norm =
static_cast<const LineSearchFunction*>(function)->DirectionInfinityNorm();
@@ -630,7 +712,7 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
"within specified max_num_iterations: %d, "
"(num iterations taken for bracketing: %d).",
options().max_num_iterations, num_bracketing_iterations);
- LOG(WARNING) << summary->error;
+ LOG_IF(WARNING, !options().is_silent) << summary->error;
return false;
}
if (fabs(bracket_high.x - bracket_low.x) * descent_direction_max_norm
@@ -642,7 +724,7 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
"too small with descent_direction_max_norm: %.5e.",
fabs(bracket_high.x - bracket_low.x),
descent_direction_max_norm);
- LOG(WARNING) << summary->error;
+ LOG_IF(WARNING, !options().is_silent) << summary->error;
return false;
}
@@ -669,15 +751,23 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
upper_bound_step.x);
// No check on magnitude of step size being too small here as it is
// lower-bounded by the initial bracket start point, which was valid.
+ //
+ // As we require the gradient to evaluate the Wolfe condition, we always
+ // calculate it together with the value, irrespective of the interpolation
+ // type. As opposed to only calculating the gradient after the Armijo
+ // condition is satisifed, as the computational saving from this approach
+ // would be slight (perhaps even negative due to the extra call). Also,
+ // always calculating the value & gradient together protects against us
+ // reporting invalid solutions if the cost function returns slightly
+ // different function values when evaluated with / without gradients (due
+ // to numerical issues).
++summary->num_function_evaluations;
- if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; }
+ ++summary->num_gradient_evaluations;
solution->value_is_valid =
function->Evaluate(solution->x,
&solution->value,
- interpolation_uses_gradients
- ? &solution->gradient : NULL);
- solution->gradient_is_valid =
- interpolation_uses_gradients && solution->value_is_valid;
+ &solution->gradient);
+ solution->gradient_is_valid = solution->value_is_valid;
if (!solution->value_is_valid) {
summary->error =
StringPrintf("Line search failed: Wolfe Zoom phase found "
@@ -685,10 +775,16 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
"between low_step: %.5e and high_step: %.5e "
"at which function is valid.",
solution->x, bracket_low.x, bracket_high.x);
- LOG(WARNING) << summary->error;
+ LOG_IF(WARNING, !options().is_silent) << summary->error;
return false;
}
+ VLOG(3) << "Zoom iteration: "
+ << summary->num_iterations - num_bracketing_iterations
+ << ", bracket_low: " << bracket_low
+ << ", bracket_high: " << bracket_high
+ << ", minimizing solution: " << *solution;
+
if ((solution->value > (initial_position.value
+ options().sufficient_decrease
* initial_position.gradient
@@ -701,31 +797,13 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
}
// Armijo sufficient decrease satisfied, check strong Wolfe condition.
- if (!interpolation_uses_gradients) {
- // Irrespective of the interpolation type we are using, we now need the
- // gradient at the current point (which satisfies the Armijo condition)
- // in order to check the strong Wolfe conditions.
- ++summary->num_function_evaluations;
- ++summary->num_gradient_evaluations;
- solution->value_is_valid =
- function->Evaluate(solution->x,
- &solution->value,
- &solution->gradient);
- solution->gradient_is_valid = solution->value_is_valid;
- if (!solution->value_is_valid) {
- summary->error =
- StringPrintf("Line search failed: Wolfe Zoom phase found "
- "step_size: %.5e, for which function is invalid, "
- "between low_step: %.5e and high_step: %.5e "
- "at which function is valid.",
- solution->x, bracket_low.x, bracket_high.x);
- LOG(WARNING) << summary->error;
- return false;
- }
- }
if (fabs(solution->gradient) <=
-options().sufficient_curvature_decrease * initial_position.gradient) {
// Found a valid termination point satisfying strong Wolfe conditions.
+ VLOG(3) << std::scientific
+ << std::setprecision(kErrorMessageNumericPrecision)
+ << "Zoom phase found step size: " << solution->x
+ << ", satisfying strong Wolfe conditions.";
break;
} else if (solution->gradient * (bracket_high.x - bracket_low.x) >= 0) {
@@ -741,5 +819,3 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
} // namespace internal
} // namespace ceres
-
-#endif // CERES_NO_LINE_SEARCH_MINIMIZER
diff --git a/internal/ceres/line_search.h b/internal/ceres/line_search.h
index 5f24e9f..97b9bc6 100644
--- a/internal/ceres/line_search.h
+++ b/internal/ceres/line_search.h
@@ -33,8 +33,6 @@
#ifndef CERES_INTERNAL_LINE_SEARCH_H_
#define CERES_INTERNAL_LINE_SEARCH_H_
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
-
#include <string>
#include <vector>
#include "ceres/internal/eigen.h"
@@ -71,6 +69,7 @@ class LineSearch {
max_num_iterations(20),
sufficient_curvature_decrease(0.9),
max_step_expansion(10.0),
+ is_silent(false),
function(NULL) {}
// Degree of the polynomial used to approximate the objective
@@ -144,6 +143,8 @@ class LineSearch {
// By definition for expansion, max_step_expansion > 1.0.
double max_step_expansion;
+ bool is_silent;
+
// The one dimensional function that the line search algorithm
// minimizes.
Function* function;
@@ -295,5 +296,4 @@ class WolfeLineSearch : public LineSearch {
} // namespace internal
} // namespace ceres
-#endif // CERES_NO_LINE_SEARCH_MINIMIZER
#endif // CERES_INTERNAL_LINE_SEARCH_H_
diff --git a/internal/ceres/line_search_direction.cc b/internal/ceres/line_search_direction.cc
index 8ded823..dddcecd 100644
--- a/internal/ceres/line_search_direction.cc
+++ b/internal/ceres/line_search_direction.cc
@@ -28,8 +28,6 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
-
#include "ceres/line_search_direction.h"
#include "ceres/line_search_minimizer.h"
#include "ceres/low_rank_inverse_hessian.h"
@@ -67,7 +65,7 @@ class NonlinearConjugateGradient : public LineSearchDirection {
case FLETCHER_REEVES:
beta = current.gradient_squared_norm / previous.gradient_squared_norm;
break;
- case POLAK_RIBIRERE:
+ case POLAK_RIBIERE:
gradient_change = current.gradient - previous.gradient;
beta = (current.gradient.dot(gradient_change) /
previous.gradient_squared_norm);
@@ -121,6 +119,7 @@ class LBFGS : public LineSearchDirection {
low_rank_inverse_hessian_.Update(
previous.search_direction * previous.step_size,
current.gradient - previous.gradient);
+
search_direction->setZero();
low_rank_inverse_hessian_.RightMultiply(current.gradient.data(),
search_direction->data());
@@ -176,9 +175,46 @@ class BFGS : public LineSearchDirection {
const Vector delta_gradient = current.gradient - previous.gradient;
const double delta_x_dot_delta_gradient = delta_x.dot(delta_gradient);
- if (delta_x_dot_delta_gradient <= 1e-10) {
+ // The (L)BFGS algorithm explicitly requires that the secant equation:
+ //
+ // B_{k+1} * s_k = y_k
+ //
+ // Is satisfied at each iteration, where B_{k+1} is the approximated
+ // Hessian at the k+1-th iteration, s_k = (x_{k+1} - x_{k}) and
+ // y_k = (grad_{k+1} - grad_{k}). As the approximated Hessian must be
+ // positive definite, this is equivalent to the condition:
+ //
+ // s_k^T * y_k > 0 [s_k^T * B_{k+1} * s_k = s_k^T * y_k > 0]
+ //
+ // This condition would always be satisfied if the function was strictly
+ // convex, alternatively, it is always satisfied provided that a Wolfe line
+ // search is used (even if the function is not strictly convex). See [1]
+ // (p138) for a proof.
+ //
+ // Although Ceres will always use a Wolfe line search when using (L)BFGS,
+ // practical implementation considerations mean that the line search
+ // may return a point that satisfies only the Armijo condition, and thus
+ // could violate the Secant equation. As such, we will only use a step
+ // to update the Hessian approximation if:
+ //
+ // s_k^T * y_k > tolerance
+ //
+ // It is important that tolerance is very small (and >=0), as otherwise we
+ // might skip the update too often and fail to capture important curvature
+ // information in the Hessian. For example going from 1e-10 -> 1e-14
+ // improves the NIST benchmark score from 43/54 to 53/54.
+ //
+ // [1] Nocedal J, Wright S, Numerical Optimization, 2nd Ed. Springer, 1999.
+ //
+ // TODO(alexs.mac): Consider using Damped BFGS update instead of
+ // skipping update.
+ const double kBFGSSecantConditionHessianUpdateTolerance = 1e-14;
+ if (delta_x_dot_delta_gradient <=
+ kBFGSSecantConditionHessianUpdateTolerance) {
VLOG(2) << "Skipping BFGS Update, delta_x_dot_delta_gradient too "
- << "small: " << delta_x_dot_delta_gradient;
+ << "small: " << delta_x_dot_delta_gradient << ", tolerance: "
+ << kBFGSSecantConditionHessianUpdateTolerance
+ << " (Secant condition).";
} else {
// Update dense inverse Hessian approximation.
@@ -214,8 +250,13 @@ class BFGS : public LineSearchDirection {
// Part II: Implementation and experiments, Management Science,
// 20(5), 863-874, 1974.
// [2] Nocedal J., Wright S., Numerical Optimization, Springer, 1999.
- inverse_hessian_ *=
+ const double approximate_eigenvalue_scale =
delta_x_dot_delta_gradient / delta_gradient.dot(delta_gradient);
+ inverse_hessian_ *= approximate_eigenvalue_scale;
+
+ VLOG(4) << "Applying approximate_eigenvalue_scale: "
+ << approximate_eigenvalue_scale << " to initial inverse "
+ << "Hessian approximation.";
}
initialized_ = true;
@@ -329,5 +370,3 @@ LineSearchDirection::Create(const LineSearchDirection::Options& options) {
} // namespace internal
} // namespace ceres
-
-#endif // CERES_NO_LINE_SEARCH_MINIMIZER
diff --git a/internal/ceres/line_search_direction.h b/internal/ceres/line_search_direction.h
index 0857cb0..c77fdc8 100644
--- a/internal/ceres/line_search_direction.h
+++ b/internal/ceres/line_search_direction.h
@@ -31,8 +31,6 @@
#ifndef CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_
#define CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
-
#include "ceres/internal/eigen.h"
#include "ceres/line_search_minimizer.h"
#include "ceres/types.h"
@@ -71,5 +69,4 @@ class LineSearchDirection {
} // namespace internal
} // namespace ceres
-#endif // CERES_NO_LINE_SEARCH_MINIMIZER
#endif // CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_
diff --git a/internal/ceres/line_search_minimizer.cc b/internal/ceres/line_search_minimizer.cc
index 2cc89fa..ae77a73 100644
--- a/internal/ceres/line_search_minimizer.cc
+++ b/internal/ceres/line_search_minimizer.cc
@@ -38,8 +38,6 @@
// For details on the theory and implementation see "Numerical
// Optimization" by Nocedal & Wright.
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
-
#include "ceres/line_search_minimizer.h"
#include <algorithm>
@@ -64,25 +62,36 @@
namespace ceres {
namespace internal {
namespace {
-// Small constant for various floating point issues.
-// TODO(sameeragarwal): Change to a better name if this has only one
-// use.
-const double kEpsilon = 1e-12;
+// TODO(sameeragarwal): I think there is a small bug here, in that if
+// the evaluation fails, then the state can contain garbage. Look at
+// this more carefully.
bool Evaluate(Evaluator* evaluator,
const Vector& x,
- LineSearchMinimizer::State* state) {
- const bool status = evaluator->Evaluate(x.data(),
- &(state->cost),
- NULL,
- state->gradient.data(),
- NULL);
- if (status) {
- state->gradient_squared_norm = state->gradient.squaredNorm();
- state->gradient_max_norm = state->gradient.lpNorm<Eigen::Infinity>();
+ LineSearchMinimizer::State* state,
+ string* message) {
+ if (!evaluator->Evaluate(x.data(),
+ &(state->cost),
+ NULL,
+ state->gradient.data(),
+ NULL)) {
+ *message = "Gradient evaluation failed.";
+ return false;
+ }
+
+ Vector negative_gradient = -state->gradient;
+ Vector projected_gradient_step(x.size());
+ if (!evaluator->Plus(x.data(),
+ negative_gradient.data(),
+ projected_gradient_step.data())) {
+ *message = "projected_gradient_step = Plus(x, -gradient) failed.";
+ return false;
}
- return status;
+ state->gradient_squared_norm = (x - projected_gradient_step).squaredNorm();
+ state->gradient_max_norm =
+ (x - projected_gradient_step).lpNorm<Eigen::Infinity>();
+ return true;
}
} // namespace
@@ -90,6 +99,7 @@ bool Evaluate(Evaluator* evaluator,
void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
double* parameters,
Solver::Summary* summary) {
+ const bool is_not_silent = !options.is_silent;
double start_time = WallTimeInSeconds();
double iteration_start_time = start_time;
@@ -115,14 +125,17 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.step_is_successful = false;
iteration_summary.cost_change = 0.0;
iteration_summary.gradient_max_norm = 0.0;
+ iteration_summary.gradient_norm = 0.0;
iteration_summary.step_norm = 0.0;
iteration_summary.linear_solver_iterations = 0;
iteration_summary.step_solver_time_in_seconds = 0;
// Do initial cost and Jacobian evaluation.
- if (!Evaluate(evaluator, x, &current_state)) {
- LOG(WARNING) << "Terminating: Cost and gradient evaluation failed.";
- summary->termination_type = NUMERICAL_FAILURE;
+ if (!Evaluate(evaluator, x, &current_state, &summary->message)) {
+ summary->termination_type = FAILURE;
+ summary->message = "Initial cost and jacobian evaluation failed. "
+ "More details: " + summary->message;
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -130,20 +143,15 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.cost = current_state.cost + summary->fixed_cost;
iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
-
- // The initial gradient max_norm is bounded from below so that we do
- // not divide by zero.
- const double initial_gradient_max_norm =
- max(iteration_summary.gradient_max_norm, kEpsilon);
- const double absolute_gradient_tolerance =
- options.gradient_tolerance * initial_gradient_max_norm;
-
- if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
- summary->termination_type = GRADIENT_TOLERANCE;
- VLOG(1) << "Terminating: Gradient tolerance reached."
- << "Relative gradient max norm: "
- << iteration_summary.gradient_max_norm / initial_gradient_max_norm
- << " <= " << options.gradient_tolerance;
+ iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
+
+ if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
+ summary->message = StringPrintf("Gradient tolerance reached. "
+ "Gradient max norm: %e <= %e",
+ iteration_summary.gradient_max_norm,
+ options.gradient_tolerance);
+ summary->termination_type = CONVERGENCE;
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -188,11 +196,10 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
scoped_ptr<LineSearch>
line_search(LineSearch::Create(options.line_search_type,
line_search_options,
- &summary->error));
+ &summary->message));
if (line_search.get() == NULL) {
- LOG(ERROR) << "Ceres bug: Unable to create a LineSearch object, please "
- << "contact the developers!, error: " << summary->error;
- summary->termination_type = DID_NOT_RUN;
+ summary->termination_type = FAILURE;
+ LOG_IF(ERROR, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -200,22 +207,24 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
int num_line_search_direction_restarts = 0;
while (true) {
- if (!RunCallbacks(options.callbacks, iteration_summary, summary)) {
- return;
+ if (!RunCallbacks(options, iteration_summary, summary)) {
+ break;
}
iteration_start_time = WallTimeInSeconds();
if (iteration_summary.iteration >= options.max_num_iterations) {
+ summary->message = "Maximum number of iterations reached.";
summary->termination_type = NO_CONVERGENCE;
- VLOG(1) << "Terminating: Maximum number of iterations reached.";
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
break;
}
const double total_solver_time = iteration_start_time - start_time +
summary->preprocessor_time_in_seconds;
if (total_solver_time >= options.max_solver_time_in_seconds) {
+ summary->message = "Maximum solver time reached.";
summary->termination_type = NO_CONVERGENCE;
- VLOG(1) << "Terminating: Maximum solver time reached.";
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
break;
}
@@ -240,14 +249,13 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
// Line search direction failed to generate a new direction, and we
// have already reached our specified maximum number of restarts,
// terminate optimization.
- summary->error =
+ summary->message =
StringPrintf("Line search direction failure: specified "
"max_num_line_search_direction_restarts: %d reached.",
options.max_num_line_search_direction_restarts);
- LOG(WARNING) << summary->error << " terminating optimization.";
- summary->termination_type = NUMERICAL_FAILURE;
+ summary->termination_type = FAILURE;
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
break;
-
} else if (!line_search_status) {
// Restart line search direction with gradient descent on first iteration
// as we have not yet reached our maximum number of restarts.
@@ -255,13 +263,16 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
options.max_num_line_search_direction_restarts);
++num_line_search_direction_restarts;
- LOG(WARNING)
+ LOG_IF(WARNING, is_not_silent)
<< "Line search direction algorithm: "
- << LineSearchDirectionTypeToString(options.line_search_direction_type)
- << ", failed to produce a valid new direction at iteration: "
- << iteration_summary.iteration << ". Restarting, number of "
- << "restarts: " << num_line_search_direction_restarts << " / "
- << options.max_num_line_search_direction_restarts << " [max].";
+ << LineSearchDirectionTypeToString(
+ options.line_search_direction_type)
+ << ", failed to produce a valid new direction at "
+ << "iteration: " << iteration_summary.iteration
+ << ". Restarting, number of restarts: "
+ << num_line_search_direction_restarts << " / "
+ << options.max_num_line_search_direction_restarts
+ << " [max].";
line_search_direction.reset(
LineSearchDirection::Create(line_search_direction_options));
current_state.search_direction = -current_state.gradient;
@@ -286,14 +297,14 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
// direction in a line search, most likely cause for this being violated
// would be a numerical failure in the line search direction calculation.
if (initial_step_size < 0.0) {
- summary->error =
+ summary->message =
StringPrintf("Numerical failure in line search, initial_step_size is "
"negative: %.5e, directional_derivative: %.5e, "
"(current_cost - previous_cost): %.5e",
initial_step_size, current_state.directional_derivative,
(current_state.cost - previous_state.cost));
- LOG(WARNING) << summary->error;
- summary->termination_type = NUMERICAL_FAILURE;
+ summary->termination_type = FAILURE;
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
break;
}
@@ -301,6 +312,18 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
current_state.cost,
current_state.directional_derivative,
&line_search_summary);
+ if (!line_search_summary.success) {
+ summary->message =
+ StringPrintf("Numerical failure in line search, failed to find "
+ "a valid step size, (did not run out of iterations) "
+ "using initial_step_size: %.5e, initial_cost: %.5e, "
+ "initial_gradient: %.5e.",
+ initial_step_size, current_state.cost,
+ current_state.directional_derivative);
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ summary->termination_type = FAILURE;
+ break;
+ }
current_state.step_size = line_search_summary.optimal_step_size;
delta = current_state.step_size * current_state.search_direction;
@@ -309,36 +332,31 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.step_solver_time_in_seconds =
WallTimeInSeconds() - iteration_start_time;
- // TODO(sameeragarwal): Collect stats.
- if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data()) ||
- !Evaluate(evaluator, x_plus_delta, &current_state)) {
- LOG(WARNING) << "Evaluation failed.";
+ if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
+ summary->termination_type = FAILURE;
+ summary->message =
+ "x_plus_delta = Plus(x, delta) failed. This should not happen "
+ "as the step was valid when it was selected by the line search.";
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ break;
+ } else if (!Evaluate(evaluator,
+ x_plus_delta,
+ &current_state,
+ &summary->message)) {
+ summary->termination_type = FAILURE;
+ summary->message =
+ "Step failed to evaluate. This should not happen as the step was "
+ "valid when it was selected by the line search. More details: " +
+ summary->message;
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ break;
} else {
x = x_plus_delta;
}
iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
- if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
- summary->termination_type = GRADIENT_TOLERANCE;
- VLOG(1) << "Terminating: Gradient tolerance reached."
- << "Relative gradient max norm: "
- << iteration_summary.gradient_max_norm / initial_gradient_max_norm
- << " <= " << options.gradient_tolerance;
- break;
- }
-
+ iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
iteration_summary.cost_change = previous_state.cost - current_state.cost;
- const double absolute_function_tolerance =
- options.function_tolerance * previous_state.cost;
- if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
- VLOG(1) << "Terminating. Function tolerance reached. "
- << "|cost_change|/cost: "
- << fabs(iteration_summary.cost_change) / previous_state.cost
- << " <= " << options.function_tolerance;
- summary->termination_type = FUNCTION_TOLERANCE;
- return;
- }
-
iteration_summary.cost = current_state.cost + summary->fixed_cost;
iteration_summary.step_norm = delta.norm();
iteration_summary.step_is_valid = true;
@@ -359,10 +377,32 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
summary->iterations.push_back(iteration_summary);
++summary->num_successful_steps;
+
+ if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
+ summary->message = StringPrintf("Gradient tolerance reached. "
+ "Gradient max norm: %e <= %e",
+ iteration_summary.gradient_max_norm,
+ options.gradient_tolerance);
+ summary->termination_type = CONVERGENCE;
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+ break;
+ }
+
+ const double absolute_function_tolerance =
+ options.function_tolerance * previous_state.cost;
+ if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
+ summary->message =
+ StringPrintf("Function tolerance reached. "
+ "|cost_change|/cost: %e <= %e",
+ fabs(iteration_summary.cost_change) /
+ previous_state.cost,
+ options.function_tolerance);
+ summary->termination_type = CONVERGENCE;
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+ break;
+ }
}
}
} // namespace internal
} // namespace ceres
-
-#endif // CERES_NO_LINE_SEARCH_MINIMIZER
diff --git a/internal/ceres/line_search_minimizer.h b/internal/ceres/line_search_minimizer.h
index 59f5c3f..f82f139 100644
--- a/internal/ceres/line_search_minimizer.h
+++ b/internal/ceres/line_search_minimizer.h
@@ -31,8 +31,6 @@
#ifndef CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
#define CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
-
#include "ceres/minimizer.h"
#include "ceres/solver.h"
#include "ceres/types.h"
@@ -76,5 +74,4 @@ class LineSearchMinimizer : public Minimizer {
} // namespace internal
} // namespace ceres
-#endif // CERES_NO_LINE_SEARCH_MINIMIZER
#endif // CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
diff --git a/internal/ceres/linear_solver.cc b/internal/ceres/linear_solver.cc
index 08c3ba1..e983e2c 100644
--- a/internal/ceres/linear_solver.cc
+++ b/internal/ceres/linear_solver.cc
@@ -45,6 +45,30 @@ namespace internal {
LinearSolver::~LinearSolver() {
}
+LinearSolverType LinearSolver::LinearSolverForZeroEBlocks(
+ LinearSolverType linear_solver_type) {
+ if (!IsSchurType(linear_solver_type)) {
+ return linear_solver_type;
+ }
+
+ if (linear_solver_type == SPARSE_SCHUR) {
+ return SPARSE_NORMAL_CHOLESKY;
+ }
+
+ if (linear_solver_type == DENSE_SCHUR) {
+ // TODO(sameeragarwal): This is probably not a great choice.
+ // Ideally, we should have a DENSE_NORMAL_CHOLESKY, that can take
+ // a BlockSparseMatrix as input.
+ return DENSE_QR;
+ }
+
+ if (linear_solver_type == ITERATIVE_SCHUR) {
+ return CGNR;
+ }
+
+ return linear_solver_type;
+}
+
LinearSolver* LinearSolver::Create(const LinearSolver::Options& options) {
switch (options.type) {
case CGNR:
@@ -52,9 +76,6 @@ LinearSolver* LinearSolver::Create(const LinearSolver::Options& options) {
case SPARSE_NORMAL_CHOLESKY:
#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
- LOG(WARNING) << "SPARSE_NORMAL_CHOLESKY is not available. Please "
- << "build Ceres with SuiteSparse or CXSparse. "
- << "Returning NULL.";
return NULL;
#else
return new SparseNormalCholeskySolver(options);
@@ -62,9 +83,6 @@ LinearSolver* LinearSolver::Create(const LinearSolver::Options& options) {
case SPARSE_SCHUR:
#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
- LOG(WARNING) << "SPARSE_SCHUR is not available. Please "
- << "build Ceres with SuiteSparse or CXSparse. "
- << "Returning NULL.";
return NULL;
#else
return new SparseSchurComplementSolver(options);
diff --git a/internal/ceres/linear_solver.h b/internal/ceres/linear_solver.h
index 22691b3..58b9044 100644
--- a/internal/ceres/linear_solver.h
+++ b/internal/ceres/linear_solver.h
@@ -50,6 +50,26 @@
namespace ceres {
namespace internal {
+enum LinearSolverTerminationType {
+ // Termination criterion was met.
+ LINEAR_SOLVER_SUCCESS,
+
+ // Solver ran for max_num_iterations and terminated before the
+ // termination tolerance could be satisfied.
+ LINEAR_SOLVER_NO_CONVERGENCE,
+
+ // Solver was terminated due to numerical problems, generally due to
+ // the linear system being poorly conditioned.
+ LINEAR_SOLVER_FAILURE,
+
+ // Solver failed with a fatal error that cannot be recovered from,
+ // e.g. CHOLMOD ran out of memory when computing the symbolic or
+ // numeric factorization or an underlying library was called with
+ // the wrong arguments.
+ LINEAR_SOLVER_FATAL_ERROR
+};
+
+
class LinearOperator;
// Abstract base class for objects that implement algorithms for
@@ -74,9 +94,11 @@ class LinearSolver {
Options()
: type(SPARSE_NORMAL_CHOLESKY),
preconditioner_type(JACOBI),
+ visibility_clustering_type(CANONICAL_VIEWS),
dense_linear_algebra_library_type(EIGEN),
sparse_linear_algebra_library_type(SUITE_SPARSE),
use_postordering(false),
+ dynamic_sparsity(false),
min_num_iterations(1),
max_num_iterations(1),
num_threads(1),
@@ -87,14 +109,14 @@ class LinearSolver {
}
LinearSolverType type;
-
PreconditionerType preconditioner_type;
-
+ VisibilityClusteringType visibility_clustering_type;
DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
// See solver.h for information about this flag.
bool use_postordering;
+ bool dynamic_sparsity;
// Number of internal iterations that the solver uses. This
// parameter only makes sense for iterative solvers like CG.
@@ -243,14 +265,23 @@ class LinearSolver {
Summary()
: residual_norm(0.0),
num_iterations(-1),
- termination_type(FAILURE) {
+ termination_type(LINEAR_SOLVER_FAILURE) {
}
double residual_norm;
int num_iterations;
LinearSolverTerminationType termination_type;
+ string message;
};
+ // If the optimization problem is such that there are no remaining
+ // e-blocks, a Schur type linear solver cannot be used. If the
+ // linear solver is of Schur type, this function implements a policy
+ // to select an alternate nearest linear solver to the one selected
+ // by the user. The input linear_solver_type is returned otherwise.
+ static LinearSolverType LinearSolverForZeroEBlocks(
+ LinearSolverType linear_solver_type);
+
virtual ~LinearSolver();
// Solve Ax = b.
diff --git a/internal/ceres/loss_function.cc b/internal/ceres/loss_function.cc
index b948f28..4ad01e3 100644
--- a/internal/ceres/loss_function.cc
+++ b/internal/ceres/loss_function.cc
@@ -39,8 +39,8 @@ namespace ceres {
void TrivialLoss::Evaluate(double s, double rho[3]) const {
rho[0] = s;
- rho[1] = 1;
- rho[2] = 0;
+ rho[1] = 1.0;
+ rho[2] = 0.0;
}
void HuberLoss::Evaluate(double s, double rho[3]) const {
@@ -48,32 +48,32 @@ void HuberLoss::Evaluate(double s, double rho[3]) const {
// Outlier region.
// 'r' is always positive.
const double r = sqrt(s);
- rho[0] = 2 * a_ * r - b_;
- rho[1] = a_ / r;
- rho[2] = - rho[1] / (2 * s);
+ rho[0] = 2.0 * a_ * r - b_;
+ rho[1] = std::max(std::numeric_limits<double>::min(), a_ / r);
+ rho[2] = - rho[1] / (2.0 * s);
} else {
// Inlier region.
rho[0] = s;
- rho[1] = 1;
- rho[2] = 0;
+ rho[1] = 1.0;
+ rho[2] = 0.0;
}
}
void SoftLOneLoss::Evaluate(double s, double rho[3]) const {
- const double sum = 1 + s * c_;
+ const double sum = 1.0 + s * c_;
const double tmp = sqrt(sum);
// 'sum' and 'tmp' are always positive, assuming that 's' is.
- rho[0] = 2 * b_ * (tmp - 1);
- rho[1] = 1 / tmp;
- rho[2] = - (c_ * rho[1]) / (2 * sum);
+ rho[0] = 2.0 * b_ * (tmp - 1.0);
+ rho[1] = std::max(std::numeric_limits<double>::min(), 1.0 / tmp);
+ rho[2] = - (c_ * rho[1]) / (2.0 * sum);
}
void CauchyLoss::Evaluate(double s, double rho[3]) const {
- const double sum = 1 + s * c_;
- const double inv = 1 / sum;
+ const double sum = 1.0 + s * c_;
+ const double inv = 1.0 / sum;
// 'sum' and 'inv' are always positive, assuming that 's' is.
rho[0] = b_ * log(sum);
- rho[1] = inv;
+ rho[1] = std::max(std::numeric_limits<double>::min(), inv);
rho[2] = - c_ * (inv * inv);
}
@@ -82,8 +82,8 @@ void ArctanLoss::Evaluate(double s, double rho[3]) const {
const double inv = 1 / sum;
// 'sum' and 'inv' are always positive.
rho[0] = a_ * atan2(s, a_);
- rho[1] = inv;
- rho[2] = -2 * s * b_ * (inv * inv);
+ rho[1] = std::max(std::numeric_limits<double>::min(), inv);
+ rho[2] = -2.0 * s * b_ * (inv * inv);
}
TolerantLoss::TolerantLoss(double a, double b)
@@ -108,7 +108,7 @@ void TolerantLoss::Evaluate(double s, double rho[3]) const {
} else {
const double e_x = exp(x);
rho[0] = b_ * log(1.0 + e_x) - c_;
- rho[1] = e_x / (1.0 + e_x);
+ rho[1] = std::max(std::numeric_limits<double>::min(), e_x / (1.0 + e_x));
rho[2] = 0.5 / (b_ * (1.0 + cosh(x)));
}
}
diff --git a/internal/ceres/low_rank_inverse_hessian.cc b/internal/ceres/low_rank_inverse_hessian.cc
index 372165f..4816e3c 100644
--- a/internal/ceres/low_rank_inverse_hessian.cc
+++ b/internal/ceres/low_rank_inverse_hessian.cc
@@ -28,6 +28,8 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
+#include <list>
+
#include "ceres/internal/eigen.h"
#include "ceres/low_rank_inverse_hessian.h"
#include "glog/logging.h"
@@ -35,6 +37,41 @@
namespace ceres {
namespace internal {
+// The (L)BFGS algorithm explicitly requires that the secant equation:
+//
+// B_{k+1} * s_k = y_k
+//
+// Is satisfied at each iteration, where B_{k+1} is the approximated
+// Hessian at the k+1-th iteration, s_k = (x_{k+1} - x_{k}) and
+// y_k = (grad_{k+1} - grad_{k}). As the approximated Hessian must be
+// positive definite, this is equivalent to the condition:
+//
+// s_k^T * y_k > 0 [s_k^T * B_{k+1} * s_k = s_k^T * y_k > 0]
+//
+// This condition would always be satisfied if the function was strictly
+// convex, alternatively, it is always satisfied provided that a Wolfe line
+// search is used (even if the function is not strictly convex). See [1]
+// (p138) for a proof.
+//
+// Although Ceres will always use a Wolfe line search when using (L)BFGS,
+// practical implementation considerations mean that the line search
+// may return a point that satisfies only the Armijo condition, and thus
+// could violate the Secant equation. As such, we will only use a step
+// to update the Hessian approximation if:
+//
+// s_k^T * y_k > tolerance
+//
+// It is important that tolerance is very small (and >=0), as otherwise we
+// might skip the update too often and fail to capture important curvature
+// information in the Hessian. For example going from 1e-10 -> 1e-14 improves
+// the NIST benchmark score from 43/54 to 53/54.
+//
+// [1] Nocedal J., Wright S., Numerical Optimization, 2nd Ed. Springer, 1999.
+//
+// TODO(alexs.mac): Consider using Damped BFGS update instead of
+// skipping update.
+const double kLBFGSSecantConditionHessianUpdateTolerance = 1e-14;
+
LowRankInverseHessian::LowRankInverseHessian(
int num_parameters,
int max_num_corrections,
@@ -42,7 +79,6 @@ LowRankInverseHessian::LowRankInverseHessian(
: num_parameters_(num_parameters),
max_num_corrections_(max_num_corrections),
use_approximate_eigenvalue_scaling_(use_approximate_eigenvalue_scaling),
- num_corrections_(0),
approximate_eigenvalue_scale_(1.0),
delta_x_history_(num_parameters, max_num_corrections),
delta_gradient_history_(num_parameters, max_num_corrections),
@@ -52,35 +88,29 @@ LowRankInverseHessian::LowRankInverseHessian(
bool LowRankInverseHessian::Update(const Vector& delta_x,
const Vector& delta_gradient) {
const double delta_x_dot_delta_gradient = delta_x.dot(delta_gradient);
- if (delta_x_dot_delta_gradient <= 1e-10) {
- VLOG(2) << "Skipping LBFGS Update, delta_x_dot_delta_gradient too small: "
- << delta_x_dot_delta_gradient;
+ if (delta_x_dot_delta_gradient <=
+ kLBFGSSecantConditionHessianUpdateTolerance) {
+ VLOG(2) << "Skipping L-BFGS Update, delta_x_dot_delta_gradient too "
+ << "small: " << delta_x_dot_delta_gradient << ", tolerance: "
+ << kLBFGSSecantConditionHessianUpdateTolerance
+ << " (Secant condition).";
return false;
}
- if (num_corrections_ == max_num_corrections_) {
- // TODO(sameeragarwal): This can be done more efficiently using
- // a circular buffer/indexing scheme, but for simplicity we will
- // do the expensive copy for now.
- delta_x_history_.block(0, 0, num_parameters_, max_num_corrections_ - 1) =
- delta_x_history_
- .block(0, 1, num_parameters_, max_num_corrections_ - 1);
-
- delta_gradient_history_
- .block(0, 0, num_parameters_, max_num_corrections_ - 1) =
- delta_gradient_history_
- .block(0, 1, num_parameters_, max_num_corrections_ - 1);
-
- delta_x_dot_delta_gradient_.head(num_corrections_ - 1) =
- delta_x_dot_delta_gradient_.tail(num_corrections_ - 1);
- } else {
- ++num_corrections_;
+
+ int next = indices_.size();
+ // Once the size of the list reaches max_num_corrections_, simulate
+ // a circular buffer by removing the first element of the list and
+ // making it the next position where the LBFGS history is stored.
+ if (next == max_num_corrections_) {
+ next = indices_.front();
+ indices_.pop_front();
}
- delta_x_history_.col(num_corrections_ - 1) = delta_x;
- delta_gradient_history_.col(num_corrections_ - 1) = delta_gradient;
- delta_x_dot_delta_gradient_(num_corrections_ - 1) =
- delta_x_dot_delta_gradient;
+ indices_.push_back(next);
+ delta_x_history_.col(next) = delta_x;
+ delta_gradient_history_.col(next) = delta_gradient;
+ delta_x_dot_delta_gradient_(next) = delta_x_dot_delta_gradient;
approximate_eigenvalue_scale_ =
delta_x_dot_delta_gradient / delta_gradient.squaredNorm();
return true;
@@ -93,12 +123,16 @@ void LowRankInverseHessian::RightMultiply(const double* x_ptr,
search_direction = gradient;
- Vector alpha(num_corrections_);
+ const int num_corrections = indices_.size();
+ Vector alpha(num_corrections);
- for (int i = num_corrections_ - 1; i >= 0; --i) {
- alpha(i) = delta_x_history_.col(i).dot(search_direction) /
- delta_x_dot_delta_gradient_(i);
- search_direction -= alpha(i) * delta_gradient_history_.col(i);
+ for (std::list<int>::const_reverse_iterator it = indices_.rbegin();
+ it != indices_.rend();
+ ++it) {
+ const double alpha_i = delta_x_history_.col(*it).dot(search_direction) /
+ delta_x_dot_delta_gradient_(*it);
+ search_direction -= alpha_i * delta_gradient_history_.col(*it);
+ alpha(*it) = alpha_i;
}
if (use_approximate_eigenvalue_scaling_) {
@@ -133,12 +167,18 @@ void LowRankInverseHessian::RightMultiply(const double* x_ptr,
// 20(5), 863-874, 1974.
// [2] Nocedal J., Wright S., Numerical Optimization, Springer, 1999.
search_direction *= approximate_eigenvalue_scale_;
+
+ VLOG(4) << "Applying approximate_eigenvalue_scale: "
+ << approximate_eigenvalue_scale_ << " to initial inverse Hessian "
+ << "approximation.";
}
- for (int i = 0; i < num_corrections_; ++i) {
- const double beta = delta_gradient_history_.col(i).dot(search_direction) /
- delta_x_dot_delta_gradient_(i);
- search_direction += delta_x_history_.col(i) * (alpha(i) - beta);
+ for (std::list<int>::const_iterator it = indices_.begin();
+ it != indices_.end();
+ ++it) {
+ const double beta = delta_gradient_history_.col(*it).dot(search_direction) /
+ delta_x_dot_delta_gradient_(*it);
+ search_direction += delta_x_history_.col(*it) * (alpha(*it) - beta);
}
}
diff --git a/internal/ceres/low_rank_inverse_hessian.h b/internal/ceres/low_rank_inverse_hessian.h
index 7d293d0..19ab760 100644
--- a/internal/ceres/low_rank_inverse_hessian.h
+++ b/internal/ceres/low_rank_inverse_hessian.h
@@ -34,6 +34,8 @@
#ifndef CERES_INTERNAL_LOW_RANK_INVERSE_HESSIAN_H_
#define CERES_INTERNAL_LOW_RANK_INVERSE_HESSIAN_H_
+#include <list>
+
#include "ceres/internal/eigen.h"
#include "ceres/linear_operator.h"
@@ -93,11 +95,11 @@ class LowRankInverseHessian : public LinearOperator {
const int num_parameters_;
const int max_num_corrections_;
const bool use_approximate_eigenvalue_scaling_;
- int num_corrections_;
double approximate_eigenvalue_scale_;
- Matrix delta_x_history_;
- Matrix delta_gradient_history_;
+ ColMajorMatrix delta_x_history_;
+ ColMajorMatrix delta_gradient_history_;
Vector delta_x_dot_delta_gradient_;
+ std::list<int> indices_;
};
} // namespace internal
diff --git a/internal/ceres/miniglog/glog/logging.cc b/internal/ceres/miniglog/glog/logging.cc
new file mode 100644
index 0000000..32a78ce
--- /dev/null
+++ b/internal/ceres/miniglog/glog/logging.cc
@@ -0,0 +1,39 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "glog/logging.h"
+
+namespace google {
+
+// This is the set of log sinks. This must be in a separate library to ensure
+// that there is only one instance of this across the entire program.
+std::set<google::LogSink *> log_sinks_global;
+
+} // namespace ceres
diff --git a/internal/ceres/miniglog/glog/logging.h b/internal/ceres/miniglog/glog/logging.h
index bab3191..e9c0dff 100644
--- a/internal/ceres/miniglog/glog/logging.h
+++ b/internal/ceres/miniglog/glog/logging.h
@@ -1,83 +1,114 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
// Author: settinger@google.com (Scott Ettinger)
-
-// Simplified Google3 style logging with Android support.
-// Supported macros are : LOG(INFO), LOG(WARNING), LOG(ERROR), LOG(FATAL),
-// and VLOG(n).
+// mierle@gmail.com (Keir Mierle)
//
-// Portions of this code are taken from the GLOG package. This code
-// is only a small subset of the GLOG functionality. And like GLOG,
-// higher levels are more verbose.
+// Simplified Glog style logging with Android support. Supported macros in
+// decreasing severity level per line:
//
-// Notable differences from GLOG :
+// VLOG(2), VLOG(N)
+// VLOG(1),
+// LOG(INFO), VLOG(0), LG
+// LOG(WARNING),
+// LOG(ERROR),
+// LOG(FATAL),
//
-// 1. lack of support for displaying unprintable characters and lack
-// of stack trace information upon failure of the CHECK macros.
-// 2. All output is tagged with the string "native".
-// 3. While there is no runtime flag filtering logs (-v, -vmodule), the
-// compile time define MAX_LOG_LEVEL can be used to silence any
-// logging above the given level.
+// With VLOG(n), the output is directed to one of the 5 Android log levels:
//
-// -------------------------------- Usage ------------------------------------
-// Basic usage :
-// LOG(<severity level>) acts as a c++ stream to the Android logcat output.
-// e.g. LOG(INFO) << "Value of counter = " << counter;
+// 2 - Verbose
+// 1 - Debug
+// 0 - Info
+// -1 - Warning
+// -2 - Error
+// -3 - Fatal
//
-// Valid severity levels include INFO, WARNING, ERROR, FATAL.
-// The various severity levels are routed to the corresponding Android logcat
-// output.
-// LOG(FATAL) outputs to the log and then terminates.
+// Any logging of level 2 and above is directed to the Verbose level. All
+// Android log output is tagged with the string "native".
//
-// VLOG(<severity level>) can also be used.
-// VLOG(n) output is directed to the Android logcat levels as follows :
-// >=2 - Verbose
-// 1 - Debug
-// 0 - Info
-// -1 - Warning
-// -2 - Error
-// <=-3 - Fatal
-// Note that VLOG(FATAL) will terminate the program.
+// If the symbol ANDROID is not defined, all output goes to std::cerr.
+// This allows code to be built on a different system for debug.
//
-// CHECK macros are defined to test for conditions within code. Any CHECK
-// that fails will log the failure and terminate the application.
+// Portions of this code are taken from the GLOG package. This code is only a
+// small subset of the GLOG functionality. Notable differences from GLOG
+// behavior include lack of support for displaying unprintable characters and
+// lack of stack trace information upon failure of the CHECK macros. On
+// non-Android systems, log output goes to std::cerr and is not written to a
+// file.
+//
+// CHECK macros are defined to test for conditions within code. Any CHECK that
+// fails will log the failure and terminate the application.
// e.g. CHECK_GE(3, 2) will pass while CHECK_GE(3, 4) will fail after logging
// "Check failed 3 >= 4".
-// The following CHECK macros are defined :
//
-// CHECK(condition) - fails if condition is false and logs condition.
-// CHECK_NOTNULL(variable) - fails if the variable is NULL.
+// The following CHECK macros are defined:
+//
+// CHECK(condition) - fails if condition is false and logs condition.
+// CHECK_NOTNULL(variable) - fails if the variable is NULL.
//
// The following binary check macros are also defined :
-// Macro operator applied
-// ------------------------------------------
-// CHECK_EQ(val1, val2) val1 == val2
-// CHECK_NE(val1, val2) val1 != val2
-// CHECK_GT(val1, val2) val1 > val2
-// CHECK_GE(val1, val2) val1 >= val2
-// CHECK_LT(val1, val2) val1 < val2
-// CHECK_LE(val1, val2) val1 <= val2
+//
+// Macro Operator equivalent
+// -------------------- -------------------
+// CHECK_EQ(val1, val2) val1 == val2
+// CHECK_NE(val1, val2) val1 != val2
+// CHECK_GT(val1, val2) val1 > val2
+// CHECK_GE(val1, val2) val1 >= val2
+// CHECK_LT(val1, val2) val1 < val2
+// CHECK_LE(val1, val2) val1 <= val2
//
// Debug only versions of all of the check macros are also defined. These
// macros generate no code in a release build, but avoid unused variable
// warnings / errors.
-// To use the debug only versions, Prepend a D to the normal check macros.
-// e.g. DCHECK_EQ(a, b);
+//
+// To use the debug only versions, prepend a D to the normal check macros, e.g.
+// DCHECK_EQ(a, b).
-#ifndef MOBILE_BASE_LOGGING_H_
-#define MOBILE_BASE_LOGGING_H_
+#ifndef CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
+#define CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
-// Definitions for building on an Android system.
-#include <android/log.h>
-#include <time.h>
+#ifdef ANDROID
+# include <android/log.h>
+#endif // ANDROID
#include <algorithm>
-#include <iostream>
-#include <string>
+#include <ctime>
#include <fstream>
+#include <iostream>
#include <set>
#include <sstream>
+#include <string>
#include <vector>
+// For appropriate definition of CERES_EXPORT macro.
+#include "ceres/internal/port.h"
+#include "ceres/internal/disable_warnings.h"
+
// Log severity level constants.
const int FATAL = -3;
const int ERROR = -2;
@@ -94,26 +125,29 @@ const int WARNING = ::WARNING;
const int ERROR = ::ERROR;
const int FATAL = ::FATAL;
-#ifdef ENABLE_LOG_SINKS
-
-// Sink class used for integration with mock and test functions.
-// If sinks are added, all log output is also sent to each sink through
-// the send function. In this implementation, WaitTillSent() is called
-// immediately after the send.
+// Sink class used for integration with mock and test functions. If sinks are
+// added, all log output is also sent to each sink through the send function.
+// In this implementation, WaitTillSent() is called immediately after the send.
// This implementation is not thread safe.
-class LogSink {
+class CERES_EXPORT LogSink {
public:
virtual ~LogSink() {}
- virtual void send(LogSeverity severity, const char* full_filename,
- const char* base_filename, int line,
+ virtual void send(LogSeverity severity,
+ const char* full_filename,
+ const char* base_filename,
+ int line,
const struct tm* tm_time,
- const char* message, size_t message_len) = 0;
+ const char* message,
+ size_t message_len) = 0;
virtual void WaitTillSent() = 0;
};
-// Global set of log sinks.
-// TODO(settinger): Move this into a .cc file.
-static std::set<LogSink *> log_sinks_global;
+// Global set of log sinks. The actual object is defined in logging.cc.
+extern CERES_EXPORT std::set<LogSink *> log_sinks_global;
+
+inline void InitGoogleLogging(char *argv) {
+ // Do nothing; this is ignored.
+}
// Note: the Log sink functions are not thread safe.
inline void AddLogSink(LogSink *sink) {
@@ -124,20 +158,17 @@ inline void RemoveLogSink(LogSink *sink) {
log_sinks_global.erase(sink);
}
-#endif // #ifdef ENABLE_LOG_SINKS
-
-inline void InitGoogleLogging(char *argv) {}
-
} // namespace google
// ---------------------------- Logger Class --------------------------------
// Class created for each use of the logging macros.
// The logger acts as a stream and routes the final stream contents to the
-// Android logcat output at the proper filter level. This class should not
+// Android logcat output at the proper filter level. If ANDROID is not
+// defined, output is directed to std::cerr. This class should not
// be directly instantiated in code, rather it should be invoked through the
-// use of the log macros LOG, or VLOG.
-class MessageLogger {
+// use of the log macros LG, LOG, or VLOG.
+class CERES_EXPORT MessageLogger {
public:
MessageLogger(const char *file, int line, const char *tag, int severity)
: file_(file), line_(line), tag_(tag), severity_(severity) {
@@ -148,17 +179,14 @@ class MessageLogger {
// Output the contents of the stream to the proper channel on destruction.
~MessageLogger() {
-#ifdef MAX_LOG_LEVEL
- if (severity_ > MAX_LOG_LEVEL && severity_ > FATAL) {
- return;
- }
-#endif
stream_ << "\n";
+
+#ifdef ANDROID
static const int android_log_levels[] = {
ANDROID_LOG_FATAL, // LOG(FATAL)
ANDROID_LOG_ERROR, // LOG(ERROR)
ANDROID_LOG_WARN, // LOG(WARNING)
- ANDROID_LOG_INFO, // LOG(INFO), VLOG(0)
+ ANDROID_LOG_INFO, // LOG(INFO), LG, VLOG(0)
ANDROID_LOG_DEBUG, // VLOG(1)
ANDROID_LOG_VERBOSE, // VLOG(2) .. VLOG(N)
};
@@ -178,14 +206,14 @@ class MessageLogger {
tag_.c_str(),
"terminating.\n");
}
-
-#ifdef ENABLE_LOG_SINKS
+#else
+ // If not building on Android, log all output to std::cerr.
+ std::cerr << stream_.str();
+#endif // ANDROID
LogToSinks(severity_);
WaitForSinks();
-#endif // #ifdef ENABLE_LOG_SINKS
-
// Android logging at level FATAL does not terminate execution, so abort()
// is still required to stop the program.
if (severity_ == FATAL) {
@@ -197,41 +225,49 @@ class MessageLogger {
std::stringstream &stream() { return stream_; }
private:
-#ifdef ENABLE_LOG_SINKS
-
void LogToSinks(int severity) {
time_t rawtime;
- struct tm * timeinfo;
+ time (&rawtime);
+
+ struct tm* timeinfo;
+#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__)
+ // On Windows, use secure localtime_s not localtime.
+ struct tm windows_timeinfo;
+ timeinfo = &windows_timeinfo;
+ localtime_s(timeinfo, &rawtime);
+#else
+ timeinfo = localtime(&rawtime);
+#endif
- time ( &rawtime );
- timeinfo = localtime ( &rawtime );
- std::set<google::LogSink *>::iterator iter;
+ std::set<google::LogSink*>::iterator iter;
// Send the log message to all sinks.
for (iter = google::log_sinks_global.begin();
- iter != google::log_sinks_global.end(); ++iter)
+ iter != google::log_sinks_global.end(); ++iter) {
(*iter)->send(severity, file_.c_str(), filename_only_.c_str(), line_,
timeinfo, stream_.str().c_str(), stream_.str().size());
+ }
}
void WaitForSinks() {
- // TODO(settinger): add locks for thread safety.
+ // TODO(settinger): Add locks for thread safety.
std::set<google::LogSink *>::iterator iter;
+
// Call WaitTillSent() for all sinks.
for (iter = google::log_sinks_global.begin();
- iter != google::log_sinks_global.end(); ++iter)
+ iter != google::log_sinks_global.end(); ++iter) {
(*iter)->WaitTillSent();
+ }
}
-#endif // #ifdef ENABLE_LOG_SINKS
-
void StripBasename(const std::string &full_path, std::string *filename) {
- // TODO(settinger): add support for OS with different path separators.
+ // TODO(settinger): Add support for OSs with different path separators.
const char kSeparator = '/';
size_t pos = full_path.rfind(kSeparator);
- if (pos != std::string::npos)
+ if (pos != std::string::npos) {
*filename = full_path.substr(pos + 1, std::string::npos);
- else
+ } else {
*filename = full_path;
+ }
}
std::string file_;
@@ -247,7 +283,7 @@ class MessageLogger {
// This class is used to explicitly ignore values in the conditional
// logging macros. This avoids compiler warnings like "value computed
// is not used" and "statement has no effect".
-class LoggerVoidify {
+class CERES_EXPORT LoggerVoidify {
public:
LoggerVoidify() { }
// This has to be an operator with a precedence lower than << but
@@ -257,8 +293,8 @@ class LoggerVoidify {
// Log only if condition is met. Otherwise evaluates to void.
#define LOG_IF(severity, condition) \
- !(condition) ? (void) 0 : LoggerVoidify() & \
- MessageLogger((char *)__FILE__, __LINE__, "native", severity).stream()
+ !(condition) ? (void) 0 : LoggerVoidify() & \
+ MessageLogger((char *)__FILE__, __LINE__, "native", severity).stream()
// Log only if condition is NOT met. Otherwise evaluates to void.
#define LOG_IF_FALSE(severity, condition) LOG_IF(severity, !(condition))
@@ -267,30 +303,31 @@ class LoggerVoidify {
// google3 code is discouraged and the following shortcut exists for
// backward compatibility with existing code.
#ifdef MAX_LOG_LEVEL
-#define LOG(n) LOG_IF(n, n <= MAX_LOG_LEVEL)
-#define VLOG(n) LOG_IF(n, n <= MAX_LOG_LEVEL)
-#define LG LOG_IF(INFO, INFO <= MAX_LOG_LEVEL)
+# define LOG(n) LOG_IF(n, n <= MAX_LOG_LEVEL)
+# define VLOG(n) LOG_IF(n, n <= MAX_LOG_LEVEL)
+# define LG LOG_IF(INFO, INFO <= MAX_LOG_LEVEL)
+# define VLOG_IF(n, condition) LOG_IF(n, (n <= MAX_LOG_LEVEL) && condition)
#else
-#define LOG(n) MessageLogger((char *)__FILE__, __LINE__, "native", n).stream()
-#define VLOG(n) MessageLogger((char *)__FILE__, __LINE__, "native", n).stream()
-#define LG MessageLogger((char *)__FILE__, __LINE__, "native", INFO).stream()
+# define LOG(n) MessageLogger((char *)__FILE__, __LINE__, "native", n).stream() // NOLINT
+# define VLOG(n) MessageLogger((char *)__FILE__, __LINE__, "native", n).stream() // NOLINT
+# define LG MessageLogger((char *)__FILE__, __LINE__, "native", INFO).stream() // NOLINT
+# define VLOG_IF(n, condition) LOG_IF(n, condition)
#endif
// Currently, VLOG is always on for levels below MAX_LOG_LEVEL.
#ifndef MAX_LOG_LEVEL
-#define VLOG_IS_ON(x) (1)
+# define VLOG_IS_ON(x) (1)
#else
-#define VLOG_IS_ON(x) (x <= MAX_LOG_LEVEL)
+# define VLOG_IS_ON(x) (x <= MAX_LOG_LEVEL)
#endif
#ifndef NDEBUG
-#define DLOG LOG
+# define DLOG LOG
#else
-#define DLOG(severity) true ? (void) 0 : LoggerVoidify() & \
- MessageLogger((char *)__FILE__, __LINE__, "native", severity).stream()
+# define DLOG(severity) true ? (void) 0 : LoggerVoidify() & \
+ MessageLogger((char *)__FILE__, __LINE__, "native", severity).stream()
#endif
-// ---------------------------- CHECK helpers --------------------------------
// Log a message and terminate.
template<class T>
@@ -307,19 +344,19 @@ void LogMessageFatal(const char *file, int line, const T &message) {
#ifndef NDEBUG
// Debug only version of CHECK
-#define DCHECK(condition) LOG_IF_FALSE(FATAL, condition) \
- << "Check failed: " #condition " "
+# define DCHECK(condition) LOG_IF_FALSE(FATAL, condition) \
+ << "Check failed: " #condition " "
#else
// Optimized version - generates no code.
-#define DCHECK(condition) if (false) LOG_IF_FALSE(FATAL, condition) \
- << "Check failed: " #condition " "
+# define DCHECK(condition) if (false) LOG_IF_FALSE(FATAL, condition) \
+ << "Check failed: " #condition " "
#endif // NDEBUG
// ------------------------- CHECK_OP macros ---------------------------------
// Generic binary operator check macro. This should not be directly invoked,
// instead use the binary comparison macros defined below.
-#define CHECK_OP(val1, val2, op) LOG_IF_FALSE(FATAL, (val1 op val2)) \
+#define CHECK_OP(val1, val2, op) LOG_IF_FALSE(FATAL, ((val1) op (val2))) \
<< "Check failed: " #val1 " " #op " " #val2 " "
// Check_op macro definitions
@@ -332,20 +369,20 @@ void LogMessageFatal(const char *file, int line, const T &message) {
#ifndef NDEBUG
// Debug only versions of CHECK_OP macros.
-#define DCHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)
-#define DCHECK_NE(val1, val2) CHECK_OP(val1, val2, !=)
-#define DCHECK_LE(val1, val2) CHECK_OP(val1, val2, <=)
-#define DCHECK_LT(val1, val2) CHECK_OP(val1, val2, <)
-#define DCHECK_GE(val1, val2) CHECK_OP(val1, val2, >=)
-#define DCHECK_GT(val1, val2) CHECK_OP(val1, val2, >)
+# define DCHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)
+# define DCHECK_NE(val1, val2) CHECK_OP(val1, val2, !=)
+# define DCHECK_LE(val1, val2) CHECK_OP(val1, val2, <=)
+# define DCHECK_LT(val1, val2) CHECK_OP(val1, val2, <)
+# define DCHECK_GE(val1, val2) CHECK_OP(val1, val2, >=)
+# define DCHECK_GT(val1, val2) CHECK_OP(val1, val2, >)
#else
// These versions generate no code in optimized mode.
-#define DCHECK_EQ(val1, val2) if (false) CHECK_OP(val1, val2, ==)
-#define DCHECK_NE(val1, val2) if (false) CHECK_OP(val1, val2, !=)
-#define DCHECK_LE(val1, val2) if (false) CHECK_OP(val1, val2, <=)
-#define DCHECK_LT(val1, val2) if (false) CHECK_OP(val1, val2, <)
-#define DCHECK_GE(val1, val2) if (false) CHECK_OP(val1, val2, >=)
-#define DCHECK_GT(val1, val2) if (false) CHECK_OP(val1, val2, >)
+# define DCHECK_EQ(val1, val2) if (false) CHECK_OP(val1, val2, ==)
+# define DCHECK_NE(val1, val2) if (false) CHECK_OP(val1, val2, !=)
+# define DCHECK_LE(val1, val2) if (false) CHECK_OP(val1, val2, <=)
+# define DCHECK_LT(val1, val2) if (false) CHECK_OP(val1, val2, <)
+# define DCHECK_GE(val1, val2) if (false) CHECK_OP(val1, val2, >=)
+# define DCHECK_GT(val1, val2) if (false) CHECK_OP(val1, val2, >)
#endif // NDEBUG
// ---------------------------CHECK_NOTNULL macros ---------------------------
@@ -384,8 +421,6 @@ T& CheckNotNull(const char *file, int line, const char *names, T& t) {
CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
#endif // NDEBUG
-inline void PrintAndroid(const char *msg) {
- __android_log_write(ANDROID_LOG_VERBOSE, "native", msg);
-}
+#include "ceres/internal/reenable_warnings.h"
-#endif // MOBILE_BASE_LOGGING_H_
+#endif // CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
diff --git a/internal/ceres/minimizer.cc b/internal/ceres/minimizer.cc
index 2e2c15a..6c3b68d 100644
--- a/internal/ceres/minimizer.cc
+++ b/internal/ceres/minimizer.cc
@@ -37,13 +37,14 @@ namespace internal {
Minimizer::~Minimizer() {}
-bool Minimizer::RunCallbacks(const vector<IterationCallback*> callbacks,
+bool Minimizer::RunCallbacks(const Minimizer::Options& options,
const IterationSummary& iteration_summary,
Solver::Summary* summary) {
+ const bool is_not_silent = !options.is_silent;
CallbackReturnType status = SOLVER_CONTINUE;
int i = 0;
- while (status == SOLVER_CONTINUE && i < callbacks.size()) {
- status = (*callbacks[i])(iteration_summary);
+ while (status == SOLVER_CONTINUE && i < options.callbacks.size()) {
+ status = (*options.callbacks[i])(iteration_summary);
++i;
}
switch (status) {
@@ -51,11 +52,13 @@ bool Minimizer::RunCallbacks(const vector<IterationCallback*> callbacks,
return true;
case SOLVER_TERMINATE_SUCCESSFULLY:
summary->termination_type = USER_SUCCESS;
- VLOG(1) << "Terminating: User callback returned USER_SUCCESS.";
+ summary->message = "User callback returned SOLVER_TERMINATE_SUCCESSFULLY.";
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return false;
case SOLVER_ABORT:
- summary->termination_type = USER_ABORT;
- VLOG(1) << "Terminating: User callback returned USER_ABORT.";
+ summary->termination_type = USER_FAILURE;
+ summary->message = "User callback returned SOLVER_ABORT.";
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return false;
default:
LOG(FATAL) << "Unknown type of user callback status";
diff --git a/internal/ceres/minimizer.h b/internal/ceres/minimizer.h
index 622e9ce..f1da3f7 100644
--- a/internal/ceres/minimizer.h
+++ b/internal/ceres/minimizer.h
@@ -107,12 +107,14 @@ class Minimizer {
options.line_search_sufficient_curvature_decrease;
max_line_search_step_expansion =
options.max_line_search_step_expansion;
+ is_silent = (options.logging_type == SILENT);
evaluator = NULL;
trust_region_strategy = NULL;
jacobian = NULL;
callbacks = options.callbacks;
inner_iteration_minimizer = NULL;
inner_iteration_tolerance = options.inner_iteration_tolerance;
+ is_constrained = false;
}
int max_num_iterations;
@@ -153,6 +155,8 @@ class Minimizer {
double line_search_sufficient_curvature_decrease;
double max_line_search_step_expansion;
+ // If true, then all logging is disabled.
+ bool is_silent;
// List of callbacks that are executed by the Minimizer at the end
// of each iteration.
@@ -177,9 +181,12 @@ class Minimizer {
Minimizer* inner_iteration_minimizer;
double inner_iteration_tolerance;
+
+ // Use a bounds constrained optimization algorithm.
+ bool is_constrained;
};
- static bool RunCallbacks(const vector<IterationCallback*> callbacks,
+ static bool RunCallbacks(const Options& options,
const IterationSummary& iteration_summary,
Solver::Summary* summary);
diff --git a/internal/ceres/minimizer_test.cc b/internal/ceres/minimizer_test.cc
index 1058036..0d8b617 100644
--- a/internal/ceres/minimizer_test.cc
+++ b/internal/ceres/minimizer_test.cc
@@ -44,7 +44,7 @@ class FakeIterationCallback : public IterationCallback {
}
};
-TEST(MinimizerTest, InitializationCopiesCallbacks) {
+TEST(Minimizer, InitializationCopiesCallbacks) {
FakeIterationCallback callback0;
FakeIterationCallback callback1;
@@ -59,5 +59,42 @@ TEST(MinimizerTest, InitializationCopiesCallbacks) {
EXPECT_EQ(minimizer_options.callbacks[1], &callback1);
}
+class AbortingIterationCallback : public IterationCallback {
+ public:
+ virtual ~AbortingIterationCallback() {}
+ virtual CallbackReturnType operator()(const IterationSummary& summary) {
+ return SOLVER_ABORT;
+ }
+};
+
+TEST(Minimizer, UserAbortUpdatesSummaryMessage) {
+ AbortingIterationCallback callback;
+ Solver::Options solver_options;
+ solver_options.callbacks.push_back(&callback);
+ Minimizer::Options minimizer_options(solver_options);
+ Solver::Summary summary;
+ Minimizer::RunCallbacks(minimizer_options, IterationSummary(), &summary);
+ EXPECT_EQ(summary.message, "User callback returned SOLVER_ABORT.");
+}
+
+class SucceedingIterationCallback : public IterationCallback {
+ public:
+ virtual ~SucceedingIterationCallback() {}
+ virtual CallbackReturnType operator()(const IterationSummary& summary) {
+ return SOLVER_TERMINATE_SUCCESSFULLY;
+ }
+};
+
+TEST(Minimizer, UserSuccessUpdatesSummaryMessage) {
+ SucceedingIterationCallback callback;
+ Solver::Options solver_options;
+ solver_options.callbacks.push_back(&callback);
+ Minimizer::Options minimizer_options(solver_options);
+ Solver::Summary summary;
+ Minimizer::RunCallbacks(minimizer_options, IterationSummary(), &summary);
+ EXPECT_EQ(summary.message,
+ "User callback returned SOLVER_TERMINATE_SUCCESSFULLY.");
+}
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/mutex.h b/internal/ceres/mutex.h
index 0c48ed3..97e2cd3 100644
--- a/internal/ceres/mutex.h
+++ b/internal/ceres/mutex.h
@@ -95,6 +95,8 @@
#ifndef CERES_INTERNAL_MUTEX_H_
#define CERES_INTERNAL_MUTEX_H_
+#include "ceres/internal/port.h"
+
#if defined(CERES_NO_THREADS)
typedef int MutexType; // to keep a lock-count
#elif defined(_WIN32) || defined(__CYGWIN32__) || defined(__CYGWIN64__)
@@ -112,7 +114,9 @@
// To avoid macro definition of ERROR.
# define NOGDI
// To avoid macro definition of min/max.
-# define NOMINMAX
+# ifndef NOMINMAX
+# define NOMINMAX
+# endif
# include <windows.h>
typedef CRITICAL_SECTION MutexType;
#elif defined(CERES_HAVE_PTHREAD) && defined(CERES_HAVE_RWLOCK)
diff --git a/internal/ceres/numeric_diff_cost_function_test.cc b/internal/ceres/numeric_diff_cost_function_test.cc
index 3953ded..422c712 100644
--- a/internal/ceres/numeric_diff_cost_function_test.cc
+++ b/internal/ceres/numeric_diff_cost_function_test.cc
@@ -184,5 +184,18 @@ TEST(NumericDiffCostFunction, EigenRowMajorColMajorTest) {
new SizeTestingCostFunction<2,2>, ceres::TAKE_OWNERSHIP));
}
+TEST(NumericDiffCostFunction, EasyCaseFunctorCentralDifferencesAndDynamicNumResiduals) {
+ internal::scoped_ptr<CostFunction> cost_function;
+ cost_function.reset(
+ new NumericDiffCostFunction<EasyFunctor,
+ CENTRAL,
+ ceres::DYNAMIC,
+ 5, /* size of x1 */
+ 5 /* size of x2 */>(
+ new EasyFunctor, TAKE_OWNERSHIP, 3));
+ EasyFunctor functor;
+ functor.ExpectCostFunctionEvaluationIsNearlyCorrect(*cost_function, CENTRAL);
+}
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/ordered_groups_test.cc b/internal/ceres/ordered_groups_test.cc
index 700e788..7719d35 100644
--- a/internal/ceres/ordered_groups_test.cc
+++ b/internal/ceres/ordered_groups_test.cc
@@ -38,7 +38,7 @@
namespace ceres {
namespace internal {
-TEST(OrderedGroup, EmptyOrderedGroupBehavesCorrectly) {
+TEST(OrderedGroups, EmptyOrderedGroupBehavesCorrectly) {
ParameterBlockOrdering ordering;
EXPECT_EQ(ordering.NumGroups(), 0);
EXPECT_EQ(ordering.NumElements(), 0);
@@ -48,7 +48,7 @@ TEST(OrderedGroup, EmptyOrderedGroupBehavesCorrectly) {
EXPECT_FALSE(ordering.Remove(&x));
}
-TEST(OrderedGroup, EverythingInOneGroup) {
+TEST(OrderedGroups, EverythingInOneGroup) {
ParameterBlockOrdering ordering;
double x[3];
ordering.AddElementToGroup(x, 1);
@@ -75,7 +75,7 @@ TEST(OrderedGroup, EverythingInOneGroup) {
EXPECT_EQ(ordering.GroupId(x + 2), 1);
}
-TEST(OrderedGroup, StartInOneGroupAndThenSplit) {
+TEST(OrderedGroups, StartInOneGroupAndThenSplit) {
ParameterBlockOrdering ordering;
double x[3];
ordering.AddElementToGroup(x, 1);
@@ -103,7 +103,7 @@ TEST(OrderedGroup, StartInOneGroupAndThenSplit) {
EXPECT_EQ(ordering.GroupId(x + 2), 1);
}
-TEST(OrderedGroup, AddAndRemoveEveryThingFromOneGroup) {
+TEST(OrderedGroups, AddAndRemoveEveryThingFromOneGroup) {
ParameterBlockOrdering ordering;
double x[3];
ordering.AddElementToGroup(x, 1);
@@ -133,7 +133,7 @@ TEST(OrderedGroup, AddAndRemoveEveryThingFromOneGroup) {
EXPECT_EQ(ordering.GroupId(x + 2), 5);
}
-TEST(OrderedGroup, ReverseOrdering) {
+TEST(OrderedGroups, ReverseOrdering) {
ParameterBlockOrdering ordering;
double x[3];
ordering.AddElementToGroup(x, 1);
@@ -159,5 +159,61 @@ TEST(OrderedGroup, ReverseOrdering) {
EXPECT_EQ(ordering.GroupId(x + 2), 2);
}
+TEST(OrderedGroups, BulkRemove) {
+ ParameterBlockOrdering ordering;
+ double x[3];
+ ordering.AddElementToGroup(x, 1);
+ ordering.AddElementToGroup(x + 1, 2);
+ ordering.AddElementToGroup(x + 2, 2);
+
+ vector<double*> elements_to_remove;
+ elements_to_remove.push_back(x);
+ elements_to_remove.push_back(x + 2);
+
+ EXPECT_EQ(ordering.Remove(elements_to_remove), 2);
+ EXPECT_EQ(ordering.NumElements(), 1);
+ EXPECT_EQ(ordering.GroupId(x), -1);
+ EXPECT_EQ(ordering.GroupId(x + 1), 2);
+ EXPECT_EQ(ordering.GroupId(x + 2), -1);
+}
+
+TEST(OrderedGroups, BulkRemoveWithNoElements) {
+ ParameterBlockOrdering ordering;
+
+ double x[3];
+ vector<double*> elements_to_remove;
+ elements_to_remove.push_back(x);
+ elements_to_remove.push_back(x + 2);
+
+ EXPECT_EQ(ordering.Remove(elements_to_remove), 0);
+
+ ordering.AddElementToGroup(x, 1);
+ ordering.AddElementToGroup(x + 1, 2);
+ ordering.AddElementToGroup(x + 2, 2);
+
+ elements_to_remove.clear();
+ EXPECT_EQ(ordering.Remove(elements_to_remove), 0);
+}
+
+TEST(OrderedGroups, MinNonZeroGroup) {
+ ParameterBlockOrdering ordering;
+ double x[3];
+
+ ordering.AddElementToGroup(x, 1);
+ ordering.AddElementToGroup(x + 1, 1);
+ ordering.AddElementToGroup(x + 2, 2);
+
+ EXPECT_EQ(ordering.MinNonZeroGroup(), 1);
+ ordering.Remove(x);
+
+ EXPECT_EQ(ordering.MinNonZeroGroup(), 1);
+ ordering.Remove(x + 1);
+
+ EXPECT_EQ(ordering.MinNonZeroGroup(), 2);
+ ordering.Remove(x + 2);
+
+ // No non-zero groups left.
+ EXPECT_DEATH_IF_SUPPORTED(ordering.MinNonZeroGroup(), "NumGroups()");
+}
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/parameter_block.h b/internal/ceres/parameter_block.h
index 695fa6f..7bc823d 100644
--- a/internal/ceres/parameter_block.h
+++ b/internal/ceres/parameter_block.h
@@ -31,7 +31,9 @@
#ifndef CERES_INTERNAL_PARAMETER_BLOCK_H_
#define CERES_INTERNAL_PARAMETER_BLOCK_H_
+#include <algorithm>
#include <cstdlib>
+#include <limits>
#include <string>
#include "ceres/array_utils.h"
#include "ceres/collections_port.h"
@@ -180,16 +182,59 @@ class ParameterBlock {
}
}
+ void SetUpperBound(int index, double upper_bound) {
+ CHECK_LT(index, size_);
+
+ if (upper_bounds_.get() == NULL) {
+ upper_bounds_.reset(new double[size_]);
+ std::fill(upper_bounds_.get(),
+ upper_bounds_.get() + size_,
+ std::numeric_limits<double>::max());
+ }
+
+ upper_bounds_[index] = upper_bound;
+ };
+
+ void SetLowerBound(int index, double lower_bound) {
+ CHECK_LT(index, size_);
+
+ if (lower_bounds_.get() == NULL) {
+ lower_bounds_.reset(new double[size_]);
+ std::fill(lower_bounds_.get(),
+ lower_bounds_.get() + size_,
+ -std::numeric_limits<double>::max());
+ }
+
+ lower_bounds_[index] = lower_bound;
+ }
+
// Generalization of the addition operation. This is the same as
- // LocalParameterization::Plus() but uses the parameter's current state
- // instead of operating on a passed in pointer.
+ // LocalParameterization::Plus() followed by projection onto the
+ // hyper cube implied by the bounds constraints.
bool Plus(const double *x, const double* delta, double* x_plus_delta) {
- if (local_parameterization_ == NULL) {
+ if (local_parameterization_ != NULL) {
+ if (!local_parameterization_->Plus(x, delta, x_plus_delta)) {
+ return false;
+ }
+ } else {
VectorRef(x_plus_delta, size_) = ConstVectorRef(x, size_) +
ConstVectorRef(delta, size_);
- return true;
}
- return local_parameterization_->Plus(x, delta, x_plus_delta);
+
+ // Project onto the box constraints.
+ if (lower_bounds_.get() != NULL) {
+ for (int i = 0; i < size_; ++i) {
+ x_plus_delta[i] = std::max(x_plus_delta[i], lower_bounds_[i]);
+ }
+ }
+
+ if (upper_bounds_.get() != NULL) {
+ for (int i = 0; i < size_; ++i) {
+ x_plus_delta[i] = std::min(x_plus_delta[i], upper_bounds_[i]);
+ }
+ }
+
+ return true;
}
string ToString() const {
@@ -234,6 +279,22 @@ class ParameterBlock {
return residual_blocks_.get();
}
+ double LowerBoundForParameter(int index) const {
+ if (lower_bounds_.get() == NULL) {
+ return -std::numeric_limits<double>::max();
+ } else {
+ return lower_bounds_[index];
+ }
+ }
+
+ double UpperBoundForParameter(int index) const {
+ if (upper_bounds_.get() == NULL) {
+ return std::numeric_limits<double>::max();
+ } else {
+ return upper_bounds_[index];
+ }
+ }
+
private:
void Init(double* user_state,
int size,
@@ -312,6 +373,20 @@ class ParameterBlock {
// If non-null, contains the residual blocks this parameter block is in.
scoped_ptr<ResidualBlockSet> residual_blocks_;
+ // Upper and lower bounds for the parameter block. SetUpperBound
+ // and SetLowerBound lazily initialize the upper_bounds_ and
+ // lower_bounds_ arrays. If they are never called, then memory for
+ // these arrays is never allocated. Thus for problems where there
+ // are no bounds, or only one sided bounds we do not pay the cost of
+ // allocating memory for the inactive bounds constraints.
+ //
+ // Upon initialization these arrays are initialized to
+ // std::numeric_limits<double>::max() and
+ // -std::numeric_limits<double>::max() respectively which correspond
+ // to the parameter block being unconstrained.
+ scoped_array<double> upper_bounds_;
+ scoped_array<double> lower_bounds_;
+
// Necessary so ProblemImpl can clean up the parameterizations.
friend class ProblemImpl;
};
diff --git a/internal/ceres/parameter_block_ordering.cc b/internal/ceres/parameter_block_ordering.cc
index 190715b..3032329 100644
--- a/internal/ceres/parameter_block_ordering.cc
+++ b/internal/ceres/parameter_block_ordering.cc
@@ -144,5 +144,21 @@ CreateHessianGraph(const Program& program) {
return graph;
}
+void OrderingToGroupSizes(const ParameterBlockOrdering* ordering,
+ vector<int>* group_sizes) {
+ CHECK_NOTNULL(group_sizes)->clear();
+ if (ordering == NULL) {
+ return;
+ }
+
+ const map<int, set<double*> >& group_to_elements =
+ ordering->group_to_elements();
+ for (map<int, set<double*> >::const_iterator it = group_to_elements.begin();
+ it != group_to_elements.end();
+ ++it) {
+ group_sizes->push_back(it->second.size());
+ }
+}
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/parameter_block_ordering.h b/internal/ceres/parameter_block_ordering.h
index 4675cb8..5de9951 100644
--- a/internal/ceres/parameter_block_ordering.h
+++ b/internal/ceres/parameter_block_ordering.h
@@ -78,6 +78,11 @@ void ComputeRecursiveIndependentSetOrdering(const Program& program,
// parameter blocks, if they co-occur in a residual block.
Graph<ParameterBlock*>* CreateHessianGraph(const Program& program);
+// Iterate over each of the groups in order of their priority and fill
+// summary with their sizes.
+void OrderingToGroupSizes(const ParameterBlockOrdering* ordering,
+ vector<int>* group_sizes);
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/parameter_block_test.cc b/internal/ceres/parameter_block_test.cc
index 09156f8..5a2db3c 100644
--- a/internal/ceres/parameter_block_test.cc
+++ b/internal/ceres/parameter_block_test.cc
@@ -169,5 +169,45 @@ TEST(ParameterBlock, DetectBadLocalParameterization) {
EXPECT_FALSE(parameter_block.SetState(&y));
}
+TEST(ParameterBlock, DefaultBounds) {
+ double x[2];
+ ParameterBlock parameter_block(x, 2, -1, NULL);
+ EXPECT_EQ(parameter_block.UpperBoundForParameter(0),
+ std::numeric_limits<double>::max());
+ EXPECT_EQ(parameter_block.UpperBoundForParameter(1),
+ std::numeric_limits<double>::max());
+ EXPECT_EQ(parameter_block.LowerBoundForParameter(0),
+ -std::numeric_limits<double>::max());
+ EXPECT_EQ(parameter_block.LowerBoundForParameter(1),
+ -std::numeric_limits<double>::max());
+}
+
+TEST(ParameterBlock, SetBounds) {
+ double x[2];
+ ParameterBlock parameter_block(x, 2, -1, NULL);
+ parameter_block.SetLowerBound(0, 1);
+ parameter_block.SetUpperBound(1, 1);
+
+ EXPECT_EQ(parameter_block.LowerBoundForParameter(0), 1.0);
+ EXPECT_EQ(parameter_block.LowerBoundForParameter(1),
+ -std::numeric_limits<double>::max());
+
+ EXPECT_EQ(parameter_block.UpperBoundForParameter(0),
+ std::numeric_limits<double>::max());
+ EXPECT_EQ(parameter_block.UpperBoundForParameter(1), 1.0);
+}
+
+TEST(ParameterBlock, PlusWithBoundsConstraints) {
+ double x[] = {1.0, 0.0};
+ double delta[] = {2.0, -10.0};
+ ParameterBlock parameter_block(x, 2, -1, NULL);
+ parameter_block.SetUpperBound(0, 2.0);
+ parameter_block.SetLowerBound(1, -1.0);
+ double x_plus_delta[2];
+ parameter_block.Plus(x, delta, x_plus_delta);
+ EXPECT_EQ(x_plus_delta[0], 2.0);
+ EXPECT_EQ(x_plus_delta[1], -1.0);
+}
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/partitioned_matrix_view.cc b/internal/ceres/partitioned_matrix_view.cc
index 59eaff8..d745a9b 100644
--- a/internal/ceres/partitioned_matrix_view.cc
+++ b/internal/ceres/partitioned_matrix_view.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -27,277 +27,153 @@
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_partitioned_matrix_view_specializations.py.
+// Editing it manually is not recommended.
-#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
-
+#include "ceres/linear_solver.h"
#include "ceres/partitioned_matrix_view.h"
-
-#include <algorithm>
-#include <cstring>
-#include <vector>
-#include "ceres/block_sparse_matrix.h"
-#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
-#include "ceres/small_blas.h"
-#include "glog/logging.h"
namespace ceres {
namespace internal {
-PartitionedMatrixView::PartitionedMatrixView(
- const BlockSparseMatrix& matrix,
- int num_col_blocks_a)
- : matrix_(matrix),
- num_col_blocks_e_(num_col_blocks_a) {
- const CompressedRowBlockStructure* bs = matrix_.block_structure();
- CHECK_NOTNULL(bs);
-
- num_col_blocks_f_ = bs->cols.size() - num_col_blocks_a;
-
- // Compute the number of row blocks in E. The number of row blocks
- // in E maybe less than the number of row blocks in the input matrix
- // as some of the row blocks at the bottom may not have any
- // e_blocks. For a definition of what an e_block is, please see
- // explicit_schur_complement_solver.h
- num_row_blocks_e_ = 0;
- for (int r = 0; r < bs->rows.size(); ++r) {
- const vector<Cell>& cells = bs->rows[r].cells;
- if (cells[0].block_id < num_col_blocks_a) {
- ++num_row_blocks_e_;
- }
+PartitionedMatrixViewBase*
+PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
+ const BlockSparseMatrix& matrix) {
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 2)) {
+ return new PartitionedMatrixView<2, 2, 2>(
+ matrix, options.elimination_groups[0]);
}
-
- // Compute the number of columns in E and F.
- num_cols_e_ = 0;
- num_cols_f_ = 0;
-
- for (int c = 0; c < bs->cols.size(); ++c) {
- const Block& block = bs->cols[c];
- if (c < num_col_blocks_a) {
- num_cols_e_ += block.size;
- } else {
- num_cols_f_ += block.size;
- }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 3)) {
+ return new PartitionedMatrixView<2, 2, 3>(
+ matrix, options.elimination_groups[0]);
}
-
- CHECK_EQ(num_cols_e_ + num_cols_f_, matrix_.num_cols());
-}
-
-PartitionedMatrixView::~PartitionedMatrixView() {
-}
-
-// The next four methods don't seem to be particularly cache
-// friendly. This is an artifact of how the BlockStructure of the
-// input matrix is constructed. These methods will benefit from
-// multithreading as well as improved data layout.
-
-void PartitionedMatrixView::RightMultiplyE(const double* x, double* y) const {
- const CompressedRowBlockStructure* bs = matrix_.block_structure();
-
- // Iterate over the first num_row_blocks_e_ row blocks, and multiply
- // by the first cell in each row block.
- const double* values = matrix_.values();
- for (int r = 0; r < num_row_blocks_e_; ++r) {
- const Cell& cell = bs->rows[r].cells[0];
- const int row_block_pos = bs->rows[r].block.position;
- const int row_block_size = bs->rows[r].block.size;
- const int col_block_id = cell.block_id;
- const int col_block_pos = bs->cols[col_block_id].position;
- const int col_block_size = bs->cols[col_block_id].size;
- MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- values + cell.position, row_block_size, col_block_size,
- x + col_block_pos,
- y + row_block_pos);
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 4)) {
+ return new PartitionedMatrixView<2, 2, 4>(
+ matrix, options.elimination_groups[0]);
}
-}
-
-void PartitionedMatrixView::RightMultiplyF(const double* x, double* y) const {
- const CompressedRowBlockStructure* bs = matrix_.block_structure();
-
- // Iterate over row blocks, and if the row block is in E, then
- // multiply by all the cells except the first one which is of type
- // E. If the row block is not in E (i.e its in the bottom
- // num_row_blocks - num_row_blocks_e row blocks), then all the cells
- // are of type F and multiply by them all.
- const double* values = matrix_.values();
- for (int r = 0; r < bs->rows.size(); ++r) {
- const int row_block_pos = bs->rows[r].block.position;
- const int row_block_size = bs->rows[r].block.size;
- const vector<Cell>& cells = bs->rows[r].cells;
- for (int c = (r < num_row_blocks_e_) ? 1 : 0; c < cells.size(); ++c) {
- const int col_block_id = cells[c].block_id;
- const int col_block_pos = bs->cols[col_block_id].position;
- const int col_block_size = bs->cols[col_block_id].size;
- MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- values + cells[c].position, row_block_size, col_block_size,
- x + col_block_pos - num_cols_e(),
- y + row_block_pos);
- }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == Eigen::Dynamic)) {
+ return new PartitionedMatrixView<2, 2, Eigen::Dynamic>(
+ matrix, options.elimination_groups[0]);
}
-}
-
-void PartitionedMatrixView::LeftMultiplyE(const double* x, double* y) const {
- const CompressedRowBlockStructure* bs = matrix_.block_structure();
-
- // Iterate over the first num_row_blocks_e_ row blocks, and multiply
- // by the first cell in each row block.
- const double* values = matrix_.values();
- for (int r = 0; r < num_row_blocks_e_; ++r) {
- const Cell& cell = bs->rows[r].cells[0];
- const int row_block_pos = bs->rows[r].block.position;
- const int row_block_size = bs->rows[r].block.size;
- const int col_block_id = cell.block_id;
- const int col_block_pos = bs->cols[col_block_id].position;
- const int col_block_size = bs->cols[col_block_id].size;
- MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- values + cell.position, row_block_size, col_block_size,
- x + row_block_pos,
- y + col_block_pos);
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 3)) {
+ return new PartitionedMatrixView<2, 3, 3>(
+ matrix, options.elimination_groups[0]);
}
-}
-
-void PartitionedMatrixView::LeftMultiplyF(const double* x, double* y) const {
- const CompressedRowBlockStructure* bs = matrix_.block_structure();
-
- // Iterate over row blocks, and if the row block is in E, then
- // multiply by all the cells except the first one which is of type
- // E. If the row block is not in E (i.e its in the bottom
- // num_row_blocks - num_row_blocks_e row blocks), then all the cells
- // are of type F and multiply by them all.
- const double* values = matrix_.values();
- for (int r = 0; r < bs->rows.size(); ++r) {
- const int row_block_pos = bs->rows[r].block.position;
- const int row_block_size = bs->rows[r].block.size;
- const vector<Cell>& cells = bs->rows[r].cells;
- for (int c = (r < num_row_blocks_e_) ? 1 : 0; c < cells.size(); ++c) {
- const int col_block_id = cells[c].block_id;
- const int col_block_pos = bs->cols[col_block_id].position;
- const int col_block_size = bs->cols[col_block_id].size;
- MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- values + cells[c].position, row_block_size, col_block_size,
- x + row_block_pos,
- y + col_block_pos - num_cols_e());
- }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 4)) {
+ return new PartitionedMatrixView<2, 3, 4>(
+ matrix, options.elimination_groups[0]);
}
-}
-
-// Given a range of columns blocks of a matrix m, compute the block
-// structure of the block diagonal of the matrix m(:,
-// start_col_block:end_col_block)'m(:, start_col_block:end_col_block)
-// and return a BlockSparseMatrix with the this block structure. The
-// caller owns the result.
-BlockSparseMatrix* PartitionedMatrixView::CreateBlockDiagonalMatrixLayout(
- int start_col_block, int end_col_block) const {
- const CompressedRowBlockStructure* bs = matrix_.block_structure();
- CompressedRowBlockStructure* block_diagonal_structure =
- new CompressedRowBlockStructure;
-
- int block_position = 0;
- int diagonal_cell_position = 0;
-
- // Iterate over the column blocks, creating a new diagonal block for
- // each column block.
- for (int c = start_col_block; c < end_col_block; ++c) {
- const Block& block = bs->cols[c];
- block_diagonal_structure->cols.push_back(Block());
- Block& diagonal_block = block_diagonal_structure->cols.back();
- diagonal_block.size = block.size;
- diagonal_block.position = block_position;
-
- block_diagonal_structure->rows.push_back(CompressedRow());
- CompressedRow& row = block_diagonal_structure->rows.back();
- row.block = diagonal_block;
-
- row.cells.push_back(Cell());
- Cell& cell = row.cells.back();
- cell.block_id = c - start_col_block;
- cell.position = diagonal_cell_position;
-
- block_position += block.size;
- diagonal_cell_position += block.size * block.size;
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 9)) {
+ return new PartitionedMatrixView<2, 3, 9>(
+ matrix, options.elimination_groups[0]);
}
-
- // Build a BlockSparseMatrix with the just computed block
- // structure.
- return new BlockSparseMatrix(block_diagonal_structure);
-}
-
-BlockSparseMatrix* PartitionedMatrixView::CreateBlockDiagonalEtE() const {
- BlockSparseMatrix* block_diagonal =
- CreateBlockDiagonalMatrixLayout(0, num_col_blocks_e_);
- UpdateBlockDiagonalEtE(block_diagonal);
- return block_diagonal;
-}
-
-BlockSparseMatrix* PartitionedMatrixView::CreateBlockDiagonalFtF() const {
- BlockSparseMatrix* block_diagonal =
- CreateBlockDiagonalMatrixLayout(
- num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_);
- UpdateBlockDiagonalFtF(block_diagonal);
- return block_diagonal;
-}
-
-// Similar to the code in RightMultiplyE, except instead of the matrix
-// vector multiply its an outer product.
-//
-// block_diagonal = block_diagonal(E'E)
-void PartitionedMatrixView::UpdateBlockDiagonalEtE(
- BlockSparseMatrix* block_diagonal) const {
- const CompressedRowBlockStructure* bs = matrix_.block_structure();
- const CompressedRowBlockStructure* block_diagonal_structure =
- block_diagonal->block_structure();
-
- block_diagonal->SetZero();
- const double* values = matrix_.values();
- for (int r = 0; r < num_row_blocks_e_ ; ++r) {
- const Cell& cell = bs->rows[r].cells[0];
- const int row_block_size = bs->rows[r].block.size;
- const int block_id = cell.block_id;
- const int col_block_size = bs->cols[block_id].size;
- const int cell_position =
- block_diagonal_structure->rows[block_id].cells[0].position;
-
- MatrixTransposeMatrixMultiply
- <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
- values + cell.position, row_block_size, col_block_size,
- values + cell.position, row_block_size, col_block_size,
- block_diagonal->mutable_values() + cell_position,
- 0, 0, col_block_size, col_block_size);
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == Eigen::Dynamic)) {
+ return new PartitionedMatrixView<2, 3, Eigen::Dynamic>(
+ matrix, options.elimination_groups[0]);
}
-}
-
-// Similar to the code in RightMultiplyF, except instead of the matrix
-// vector multiply its an outer product.
-//
-// block_diagonal = block_diagonal(F'F)
-//
-void PartitionedMatrixView::UpdateBlockDiagonalFtF(
- BlockSparseMatrix* block_diagonal) const {
- const CompressedRowBlockStructure* bs = matrix_.block_structure();
- const CompressedRowBlockStructure* block_diagonal_structure =
- block_diagonal->block_structure();
-
- block_diagonal->SetZero();
- const double* values = matrix_.values();
- for (int r = 0; r < bs->rows.size(); ++r) {
- const int row_block_size = bs->rows[r].block.size;
- const vector<Cell>& cells = bs->rows[r].cells;
- for (int c = (r < num_row_blocks_e_) ? 1 : 0; c < cells.size(); ++c) {
- const int col_block_id = cells[c].block_id;
- const int col_block_size = bs->cols[col_block_id].size;
- const int diagonal_block_id = col_block_id - num_col_blocks_e_;
- const int cell_position =
- block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
-
- MatrixTransposeMatrixMultiply
- <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
- values + cells[c].position, row_block_size, col_block_size,
- values + cells[c].position, row_block_size, col_block_size,
- block_diagonal->mutable_values() + cell_position,
- 0, 0, col_block_size, col_block_size);
- }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 3)) {
+ return new PartitionedMatrixView<2, 4, 3>(
+ matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 4)) {
+ return new PartitionedMatrixView<2, 4, 4>(
+ matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 8)) {
+ return new PartitionedMatrixView<2, 4, 8>(
+ matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 9)) {
+ return new PartitionedMatrixView<2, 4, 9>(
+ matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == Eigen::Dynamic)) {
+ return new PartitionedMatrixView<2, 4, Eigen::Dynamic>(
+ matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == Eigen::Dynamic) &&
+ (options.f_block_size == Eigen::Dynamic)) {
+ return new PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>(
+ matrix, options.elimination_groups[0]);
}
-}
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 2)) {
+ return new PartitionedMatrixView<4, 4, 2>(
+ matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 3)) {
+ return new PartitionedMatrixView<4, 4, 3>(
+ matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 4)) {
+ return new PartitionedMatrixView<4, 4, 4>(
+ matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == Eigen::Dynamic)) {
+ return new PartitionedMatrixView<4, 4, Eigen::Dynamic>(
+ matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == Eigen::Dynamic) &&
+ (options.e_block_size == Eigen::Dynamic) &&
+ (options.f_block_size == Eigen::Dynamic)) {
+ return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
+ matrix, options.elimination_groups[0]);
+ }
+
+#endif
+ VLOG(1) << "Template specializations not found for <"
+ << options.row_block_size << ","
+ << options.e_block_size << ","
+ << options.f_block_size << ">";
+ return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
+ matrix, options.elimination_groups[0]);
+};
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/partitioned_matrix_view.h b/internal/ceres/partitioned_matrix_view.h
index ebfbe40..661252d 100644
--- a/internal/ceres/partitioned_matrix_view.h
+++ b/internal/ceres/partitioned_matrix_view.h
@@ -36,7 +36,15 @@
#ifndef CERES_INTERNAL_PARTITIONED_MATRIX_VIEW_H_
#define CERES_INTERNAL_PARTITIONED_MATRIX_VIEW_H_
-#include "ceres/block_sparse_matrix.h"
+#include <algorithm>
+#include <cstring>
+#include <vector>
+
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/small_blas.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -51,57 +59,80 @@ namespace internal {
// block structure of the matrix does not satisfy the requirements of
// the Schur complement solver it will result in unpredictable and
// wrong output.
-//
-// This class lives in the internal name space as its a utility class
-// to be used by the IterativeSchurComplementSolver class, found in
-// iterative_schur_complement_solver.h, and is not meant for general
-// consumption.
-class PartitionedMatrixView {
+class PartitionedMatrixViewBase {
public:
- // matrix = [E F], where the matrix E contains the first
- // num_col_blocks_a column blocks.
- PartitionedMatrixView(const BlockSparseMatrix& matrix,
- int num_col_blocks_a);
- ~PartitionedMatrixView();
+ virtual ~PartitionedMatrixViewBase() {}
// y += E'x
- void LeftMultiplyE(const double* x, double* y) const;
+ virtual void LeftMultiplyE(const double* x, double* y) const = 0;
// y += F'x
- void LeftMultiplyF(const double* x, double* y) const;
+ virtual void LeftMultiplyF(const double* x, double* y) const = 0;
// y += Ex
- void RightMultiplyE(const double* x, double* y) const;
+ virtual void RightMultiplyE(const double* x, double* y) const = 0;
// y += Fx
- void RightMultiplyF(const double* x, double* y) const;
+ virtual void RightMultiplyF(const double* x, double* y) const = 0;
// Create and return the block diagonal of the matrix E'E.
- BlockSparseMatrix* CreateBlockDiagonalEtE() const;
+ virtual BlockSparseMatrix* CreateBlockDiagonalEtE() const = 0;
- // Create and return the block diagonal of the matrix F'F.
- BlockSparseMatrix* CreateBlockDiagonalFtF() const;
+ // Create and return the block diagonal of the matrix F'F. Caller
+ // owns the result.
+ virtual BlockSparseMatrix* CreateBlockDiagonalFtF() const = 0;
// Compute the block diagonal of the matrix E'E and store it in
// block_diagonal. The matrix block_diagonal is expected to have a
// BlockStructure (preferably created using
// CreateBlockDiagonalMatrixEtE) which is has the same structure as
// the block diagonal of E'E.
- void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const;
+ virtual void UpdateBlockDiagonalEtE(
+ BlockSparseMatrix* block_diagonal) const = 0;
// Compute the block diagonal of the matrix F'F and store it in
// block_diagonal. The matrix block_diagonal is expected to have a
// BlockStructure (preferably created using
// CreateBlockDiagonalMatrixFtF) which is has the same structure as
// the block diagonal of F'F.
- void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const;
-
- int num_col_blocks_e() const { return num_col_blocks_e_; }
- int num_col_blocks_f() const { return num_col_blocks_f_; }
- int num_cols_e() const { return num_cols_e_; }
- int num_cols_f() const { return num_cols_f_; }
- int num_rows() const { return matrix_.num_rows(); }
- int num_cols() const { return matrix_.num_cols(); }
+ virtual void UpdateBlockDiagonalFtF(
+ BlockSparseMatrix* block_diagonal) const = 0;
+
+ virtual int num_col_blocks_e() const = 0;
+ virtual int num_col_blocks_f() const = 0;
+ virtual int num_cols_e() const = 0;
+ virtual int num_cols_f() const = 0;
+ virtual int num_rows() const = 0;
+ virtual int num_cols() const = 0;
+
+ static PartitionedMatrixViewBase* Create(const LinearSolver::Options& options,
+ const BlockSparseMatrix& matrix);
+};
+
+template <int kRowBlockSize = Eigen::Dynamic,
+ int kEBlockSize = Eigen::Dynamic,
+ int kFBlockSize = Eigen::Dynamic >
+class PartitionedMatrixView : public PartitionedMatrixViewBase {
+ public:
+ // matrix = [E F], where the matrix E contains the first
+ // num_col_blocks_a column blocks.
+ PartitionedMatrixView(const BlockSparseMatrix& matrix, int num_col_blocks_e);
+
+ virtual ~PartitionedMatrixView();
+ virtual void LeftMultiplyE(const double* x, double* y) const;
+ virtual void LeftMultiplyF(const double* x, double* y) const;
+ virtual void RightMultiplyE(const double* x, double* y) const;
+ virtual void RightMultiplyF(const double* x, double* y) const;
+ virtual BlockSparseMatrix* CreateBlockDiagonalEtE() const;
+ virtual BlockSparseMatrix* CreateBlockDiagonalFtF() const;
+ virtual void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const;
+ virtual void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const;
+ virtual int num_col_blocks_e() const { return num_col_blocks_e_; }
+ virtual int num_col_blocks_f() const { return num_col_blocks_f_; }
+ virtual int num_cols_e() const { return num_cols_e_; }
+ virtual int num_cols_f() const { return num_cols_f_; }
+ virtual int num_rows() const { return matrix_.num_rows(); }
+ virtual int num_cols() const { return matrix_.num_cols(); }
private:
BlockSparseMatrix* CreateBlockDiagonalMatrixLayout(int start_col_block,
diff --git a/internal/ceres/partitioned_matrix_view_impl.h b/internal/ceres/partitioned_matrix_view_impl.h
new file mode 100644
index 0000000..ae7f776
--- /dev/null
+++ b/internal/ceres/partitioned_matrix_view_impl.h
@@ -0,0 +1,380 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/partitioned_matrix_view.h"
+
+#include <algorithm>
+#include <cstring>
+#include <vector>
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/small_blas.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+PartitionedMatrixView(
+ const BlockSparseMatrix& matrix,
+ int num_col_blocks_e)
+ : matrix_(matrix),
+ num_col_blocks_e_(num_col_blocks_e) {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+ CHECK_NOTNULL(bs);
+
+ num_col_blocks_f_ = bs->cols.size() - num_col_blocks_e_;
+
+ // Compute the number of row blocks in E. The number of row blocks
+ // in E maybe less than the number of row blocks in the input matrix
+ // as some of the row blocks at the bottom may not have any
+ // e_blocks. For a definition of what an e_block is, please see
+ // explicit_schur_complement_solver.h
+ num_row_blocks_e_ = 0;
+ for (int r = 0; r < bs->rows.size(); ++r) {
+ const vector<Cell>& cells = bs->rows[r].cells;
+ if (cells[0].block_id < num_col_blocks_e_) {
+ ++num_row_blocks_e_;
+ }
+ }
+
+ // Compute the number of columns in E and F.
+ num_cols_e_ = 0;
+ num_cols_f_ = 0;
+
+ for (int c = 0; c < bs->cols.size(); ++c) {
+ const Block& block = bs->cols[c];
+ if (c < num_col_blocks_e_) {
+ num_cols_e_ += block.size;
+ } else {
+ num_cols_f_ += block.size;
+ }
+ }
+
+ CHECK_EQ(num_cols_e_ + num_cols_f_, matrix_.num_cols());
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+~PartitionedMatrixView() {
+}
+
+// The next four methods don't seem to be particularly cache
+// friendly. This is an artifact of how the BlockStructure of the
+// input matrix is constructed. These methods will benefit from
+// multithreading as well as improved data layout.
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+RightMultiplyE(const double* x, double* y) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+ // Iterate over the first num_row_blocks_e_ row blocks, and multiply
+ // by the first cell in each row block.
+ const double* values = matrix_.values();
+ for (int r = 0; r < num_row_blocks_e_; ++r) {
+ const Cell& cell = bs->rows[r].cells[0];
+ const int row_block_pos = bs->rows[r].block.position;
+ const int row_block_size = bs->rows[r].block.size;
+ const int col_block_id = cell.block_id;
+ const int col_block_pos = bs->cols[col_block_id].position;
+ const int col_block_size = bs->cols[col_block_id].size;
+ MatrixVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
+ values + cell.position, row_block_size, col_block_size,
+ x + col_block_pos,
+ y + row_block_pos);
+ }
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+RightMultiplyF(const double* x, double* y) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+ // Iterate over row blocks, and if the row block is in E, then
+ // multiply by all the cells except the first one which is of type
+ // E. If the row block is not in E (i.e its in the bottom
+ // num_row_blocks - num_row_blocks_e row blocks), then all the cells
+ // are of type F and multiply by them all.
+ const double* values = matrix_.values();
+ for (int r = 0; r < num_row_blocks_e_; ++r) {
+ const int row_block_pos = bs->rows[r].block.position;
+ const int row_block_size = bs->rows[r].block.size;
+ const vector<Cell>& cells = bs->rows[r].cells;
+ for (int c = 1; c < cells.size(); ++c) {
+ const int col_block_id = cells[c].block_id;
+ const int col_block_pos = bs->cols[col_block_id].position;
+ const int col_block_size = bs->cols[col_block_id].size;
+ MatrixVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
+ values + cells[c].position, row_block_size, col_block_size,
+ x + col_block_pos - num_cols_e_,
+ y + row_block_pos);
+ }
+ }
+
+ for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) {
+ const int row_block_pos = bs->rows[r].block.position;
+ const int row_block_size = bs->rows[r].block.size;
+ const vector<Cell>& cells = bs->rows[r].cells;
+ for (int c = 0; c < cells.size(); ++c) {
+ const int col_block_id = cells[c].block_id;
+ const int col_block_pos = bs->cols[col_block_id].position;
+ const int col_block_size = bs->cols[col_block_id].size;
+ MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+ values + cells[c].position, row_block_size, col_block_size,
+ x + col_block_pos - num_cols_e_,
+ y + row_block_pos);
+ }
+ }
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+LeftMultiplyE(const double* x, double* y) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+ // Iterate over the first num_row_blocks_e_ row blocks, and multiply
+ // by the first cell in each row block.
+ const double* values = matrix_.values();
+ for (int r = 0; r < num_row_blocks_e_; ++r) {
+ const Cell& cell = bs->rows[r].cells[0];
+ const int row_block_pos = bs->rows[r].block.position;
+ const int row_block_size = bs->rows[r].block.size;
+ const int col_block_id = cell.block_id;
+ const int col_block_pos = bs->cols[col_block_id].position;
+ const int col_block_size = bs->cols[col_block_id].size;
+ MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
+ values + cell.position, row_block_size, col_block_size,
+ x + row_block_pos,
+ y + col_block_pos);
+ }
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+LeftMultiplyF(const double* x, double* y) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+ // Iterate over row blocks, and if the row block is in E, then
+ // multiply by all the cells except the first one which is of type
+ // E. If the row block is not in E (i.e its in the bottom
+ // num_row_blocks - num_row_blocks_e row blocks), then all the cells
+ // are of type F and multiply by them all.
+ const double* values = matrix_.values();
+ for (int r = 0; r < num_row_blocks_e_; ++r) {
+ const int row_block_pos = bs->rows[r].block.position;
+ const int row_block_size = bs->rows[r].block.size;
+ const vector<Cell>& cells = bs->rows[r].cells;
+ for (int c = 1; c < cells.size(); ++c) {
+ const int col_block_id = cells[c].block_id;
+ const int col_block_pos = bs->cols[col_block_id].position;
+ const int col_block_size = bs->cols[col_block_id].size;
+ MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
+ values + cells[c].position, row_block_size, col_block_size,
+ x + row_block_pos,
+ y + col_block_pos - num_cols_e_);
+ }
+ }
+
+ for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) {
+ const int row_block_pos = bs->rows[r].block.position;
+ const int row_block_size = bs->rows[r].block.size;
+ const vector<Cell>& cells = bs->rows[r].cells;
+ for (int c = 0; c < cells.size(); ++c) {
+ const int col_block_id = cells[c].block_id;
+ const int col_block_pos = bs->cols[col_block_id].position;
+ const int col_block_size = bs->cols[col_block_id].size;
+ MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+ values + cells[c].position, row_block_size, col_block_size,
+ x + row_block_pos,
+ y + col_block_pos - num_cols_e_);
+ }
+ }
+}
+
+// Given a range of columns blocks of a matrix m, compute the block
+// structure of the block diagonal of the matrix m(:,
+// start_col_block:end_col_block)'m(:, start_col_block:end_col_block)
+// and return a BlockSparseMatrix with the this block structure. The
+// caller owns the result.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+BlockSparseMatrix*
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+CreateBlockDiagonalMatrixLayout(int start_col_block, int end_col_block) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+ CompressedRowBlockStructure* block_diagonal_structure =
+ new CompressedRowBlockStructure;
+
+ int block_position = 0;
+ int diagonal_cell_position = 0;
+
+ // Iterate over the column blocks, creating a new diagonal block for
+ // each column block.
+ for (int c = start_col_block; c < end_col_block; ++c) {
+ const Block& block = bs->cols[c];
+ block_diagonal_structure->cols.push_back(Block());
+ Block& diagonal_block = block_diagonal_structure->cols.back();
+ diagonal_block.size = block.size;
+ diagonal_block.position = block_position;
+
+ block_diagonal_structure->rows.push_back(CompressedRow());
+ CompressedRow& row = block_diagonal_structure->rows.back();
+ row.block = diagonal_block;
+
+ row.cells.push_back(Cell());
+ Cell& cell = row.cells.back();
+ cell.block_id = c - start_col_block;
+ cell.position = diagonal_cell_position;
+
+ block_position += block.size;
+ diagonal_cell_position += block.size * block.size;
+ }
+
+ // Build a BlockSparseMatrix with the just computed block
+ // structure.
+ return new BlockSparseMatrix(block_diagonal_structure);
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+BlockSparseMatrix*
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+CreateBlockDiagonalEtE() const {
+ BlockSparseMatrix* block_diagonal =
+ CreateBlockDiagonalMatrixLayout(0, num_col_blocks_e_);
+ UpdateBlockDiagonalEtE(block_diagonal);
+ return block_diagonal;
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+BlockSparseMatrix*
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+CreateBlockDiagonalFtF() const {
+ BlockSparseMatrix* block_diagonal =
+ CreateBlockDiagonalMatrixLayout(
+ num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_);
+ UpdateBlockDiagonalFtF(block_diagonal);
+ return block_diagonal;
+}
+
+// Similar to the code in RightMultiplyE, except instead of the matrix
+// vector multiply its an outer product.
+//
+// block_diagonal = block_diagonal(E'E)
+//
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+UpdateBlockDiagonalEtE(
+ BlockSparseMatrix* block_diagonal) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+ const CompressedRowBlockStructure* block_diagonal_structure =
+ block_diagonal->block_structure();
+
+ block_diagonal->SetZero();
+ const double* values = matrix_.values();
+ for (int r = 0; r < num_row_blocks_e_ ; ++r) {
+ const Cell& cell = bs->rows[r].cells[0];
+ const int row_block_size = bs->rows[r].block.size;
+ const int block_id = cell.block_id;
+ const int col_block_size = bs->cols[block_id].size;
+ const int cell_position =
+ block_diagonal_structure->rows[block_id].cells[0].position;
+
+ MatrixTransposeMatrixMultiply
+ <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
+ values + cell.position, row_block_size, col_block_size,
+ values + cell.position, row_block_size, col_block_size,
+ block_diagonal->mutable_values() + cell_position,
+ 0, 0, col_block_size, col_block_size);
+ }
+}
+
+// Similar to the code in RightMultiplyF, except instead of the matrix
+// vector multiply its an outer product.
+//
+// block_diagonal = block_diagonal(F'F)
+//
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+ const CompressedRowBlockStructure* block_diagonal_structure =
+ block_diagonal->block_structure();
+
+ block_diagonal->SetZero();
+ const double* values = matrix_.values();
+ for (int r = 0; r < num_row_blocks_e_; ++r) {
+ const int row_block_size = bs->rows[r].block.size;
+ const vector<Cell>& cells = bs->rows[r].cells;
+ for (int c = 1; c < cells.size(); ++c) {
+ const int col_block_id = cells[c].block_id;
+ const int col_block_size = bs->cols[col_block_id].size;
+ const int diagonal_block_id = col_block_id - num_col_blocks_e_;
+ const int cell_position =
+ block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
+
+ MatrixTransposeMatrixMultiply
+ <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
+ values + cells[c].position, row_block_size, col_block_size,
+ values + cells[c].position, row_block_size, col_block_size,
+ block_diagonal->mutable_values() + cell_position,
+ 0, 0, col_block_size, col_block_size);
+ }
+ }
+
+ for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) {
+ const int row_block_size = bs->rows[r].block.size;
+ const vector<Cell>& cells = bs->rows[r].cells;
+ for (int c = 0; c < cells.size(); ++c) {
+ const int col_block_id = cells[c].block_id;
+ const int col_block_size = bs->cols[col_block_id].size;
+ const int diagonal_block_id = col_block_id - num_col_blocks_e_;
+ const int cell_position =
+ block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
+
+ MatrixTransposeMatrixMultiply
+ <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
+ values + cells[c].position, row_block_size, col_block_size,
+ values + cells[c].position, row_block_size, col_block_size,
+ block_diagonal->mutable_values() + cell_position,
+ 0, 0, col_block_size, col_block_size);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/partitioned_matrix_view_test.cc b/internal/ceres/partitioned_matrix_view_test.cc
index 48f7d24..ef5dae9 100644
--- a/internal/ceres/partitioned_matrix_view_test.cc
+++ b/internal/ceres/partitioned_matrix_view_test.cc
@@ -49,6 +49,7 @@ const double kEpsilon = 1e-14;
class PartitionedMatrixViewTest : public ::testing::Test {
protected :
virtual void SetUp() {
+ srand(5);
scoped_ptr<LinearLeastSquaresProblem> problem(
CreateLinearLeastSquaresProblemFromId(2));
CHECK_NOTNULL(problem.get());
@@ -57,108 +58,93 @@ class PartitionedMatrixViewTest : public ::testing::Test {
num_cols_ = A_->num_cols();
num_rows_ = A_->num_rows();
num_eliminate_blocks_ = problem->num_eliminate_blocks;
+ LinearSolver::Options options;
+ options.elimination_groups.push_back(num_eliminate_blocks_);
+ pmv_.reset(PartitionedMatrixViewBase::Create(
+ options,
+ *down_cast<BlockSparseMatrix*>(A_.get())));
}
int num_rows_;
int num_cols_;
int num_eliminate_blocks_;
-
scoped_ptr<SparseMatrix> A_;
+ scoped_ptr<PartitionedMatrixViewBase> pmv_;
};
TEST_F(PartitionedMatrixViewTest, DimensionsTest) {
- PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
- num_eliminate_blocks_);
- EXPECT_EQ(m.num_col_blocks_e(), num_eliminate_blocks_);
- EXPECT_EQ(m.num_col_blocks_f(), num_cols_ - num_eliminate_blocks_);
- EXPECT_EQ(m.num_cols_e(), num_eliminate_blocks_);
- EXPECT_EQ(m.num_cols_f(), num_cols_ - num_eliminate_blocks_);
- EXPECT_EQ(m.num_cols(), A_->num_cols());
- EXPECT_EQ(m.num_rows(), A_->num_rows());
+ EXPECT_EQ(pmv_->num_col_blocks_e(), num_eliminate_blocks_);
+ EXPECT_EQ(pmv_->num_col_blocks_f(), num_cols_ - num_eliminate_blocks_);
+ EXPECT_EQ(pmv_->num_cols_e(), num_eliminate_blocks_);
+ EXPECT_EQ(pmv_->num_cols_f(), num_cols_ - num_eliminate_blocks_);
+ EXPECT_EQ(pmv_->num_cols(), A_->num_cols());
+ EXPECT_EQ(pmv_->num_rows(), A_->num_rows());
}
TEST_F(PartitionedMatrixViewTest, RightMultiplyE) {
- PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
- num_eliminate_blocks_);
-
- srand(5);
-
- Vector x1(m.num_cols_e());
- Vector x2(m.num_cols());
+ Vector x1(pmv_->num_cols_e());
+ Vector x2(pmv_->num_cols());
x2.setZero();
- for (int i = 0; i < m.num_cols_e(); ++i) {
+ for (int i = 0; i < pmv_->num_cols_e(); ++i) {
x1(i) = x2(i) = RandDouble();
}
- Vector y1 = Vector::Zero(m.num_rows());
- m.RightMultiplyE(x1.data(), y1.data());
+ Vector y1 = Vector::Zero(pmv_->num_rows());
+ pmv_->RightMultiplyE(x1.data(), y1.data());
- Vector y2 = Vector::Zero(m.num_rows());
+ Vector y2 = Vector::Zero(pmv_->num_rows());
A_->RightMultiply(x2.data(), y2.data());
- for (int i = 0; i < m.num_rows(); ++i) {
+ for (int i = 0; i < pmv_->num_rows(); ++i) {
EXPECT_NEAR(y1(i), y2(i), kEpsilon);
}
}
TEST_F(PartitionedMatrixViewTest, RightMultiplyF) {
- PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
- num_eliminate_blocks_);
-
- srand(5);
-
- Vector x1(m.num_cols_f());
- Vector x2 = Vector::Zero(m.num_cols());
+ Vector x1(pmv_->num_cols_f());
+ Vector x2 = Vector::Zero(pmv_->num_cols());
- for (int i = 0; i < m.num_cols_f(); ++i) {
+ for (int i = 0; i < pmv_->num_cols_f(); ++i) {
x1(i) = RandDouble();
- x2(i + m.num_cols_e()) = x1(i);
+ x2(i + pmv_->num_cols_e()) = x1(i);
}
- Vector y1 = Vector::Zero(m.num_rows());
- m.RightMultiplyF(x1.data(), y1.data());
+ Vector y1 = Vector::Zero(pmv_->num_rows());
+ pmv_->RightMultiplyF(x1.data(), y1.data());
- Vector y2 = Vector::Zero(m.num_rows());
+ Vector y2 = Vector::Zero(pmv_->num_rows());
A_->RightMultiply(x2.data(), y2.data());
- for (int i = 0; i < m.num_rows(); ++i) {
+ for (int i = 0; i < pmv_->num_rows(); ++i) {
EXPECT_NEAR(y1(i), y2(i), kEpsilon);
}
}
TEST_F(PartitionedMatrixViewTest, LeftMultiply) {
- PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
- num_eliminate_blocks_);
-
- srand(5);
-
- Vector x = Vector::Zero(m.num_rows());
- for (int i = 0; i < m.num_rows(); ++i) {
+ Vector x = Vector::Zero(pmv_->num_rows());
+ for (int i = 0; i < pmv_->num_rows(); ++i) {
x(i) = RandDouble();
}
- Vector y = Vector::Zero(m.num_cols());
- Vector y1 = Vector::Zero(m.num_cols_e());
- Vector y2 = Vector::Zero(m.num_cols_f());
+ Vector y = Vector::Zero(pmv_->num_cols());
+ Vector y1 = Vector::Zero(pmv_->num_cols_e());
+ Vector y2 = Vector::Zero(pmv_->num_cols_f());
A_->LeftMultiply(x.data(), y.data());
- m.LeftMultiplyE(x.data(), y1.data());
- m.LeftMultiplyF(x.data(), y2.data());
+ pmv_->LeftMultiplyE(x.data(), y1.data());
+ pmv_->LeftMultiplyF(x.data(), y2.data());
- for (int i = 0; i < m.num_cols(); ++i) {
+ for (int i = 0; i < pmv_->num_cols(); ++i) {
EXPECT_NEAR(y(i),
- (i < m.num_cols_e()) ? y1(i) : y2(i - m.num_cols_e()),
+ (i < pmv_->num_cols_e()) ? y1(i) : y2(i - pmv_->num_cols_e()),
kEpsilon);
}
}
TEST_F(PartitionedMatrixViewTest, BlockDiagonalEtE) {
- PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
- num_eliminate_blocks_);
-
scoped_ptr<BlockSparseMatrix>
- block_diagonal_ee(m.CreateBlockDiagonalEtE());
+ block_diagonal_ee(pmv_->CreateBlockDiagonalEtE());
const CompressedRowBlockStructure* bs = block_diagonal_ee->block_structure();
EXPECT_EQ(block_diagonal_ee->num_rows(), 2);
@@ -171,11 +157,8 @@ TEST_F(PartitionedMatrixViewTest, BlockDiagonalEtE) {
}
TEST_F(PartitionedMatrixViewTest, BlockDiagonalFtF) {
- PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
- num_eliminate_blocks_);
-
scoped_ptr<BlockSparseMatrix>
- block_diagonal_ff(m.CreateBlockDiagonalFtF());
+ block_diagonal_ff(pmv_->CreateBlockDiagonalFtF());
const CompressedRowBlockStructure* bs = block_diagonal_ff->block_structure();
EXPECT_EQ(block_diagonal_ff->num_rows(), 3);
diff --git a/internal/ceres/polynomial.cc b/internal/ceres/polynomial.cc
index 3238b89..75f43de 100644
--- a/internal/ceres/polynomial.cc
+++ b/internal/ceres/polynomial.cc
@@ -37,6 +37,7 @@
#include "Eigen/Dense"
#include "ceres/internal/port.h"
+#include "ceres/stringprintf.h"
#include "glog/logging.h"
namespace ceres {
@@ -119,6 +120,63 @@ Vector RemoveLeadingZeros(const Vector& polynomial_in) {
}
return polynomial_in.tail(polynomial_in.size() - i);
}
+
+void FindLinearPolynomialRoots(const Vector& polynomial,
+ Vector* real,
+ Vector* imaginary) {
+ CHECK_EQ(polynomial.size(), 2);
+ if (real != NULL) {
+ real->resize(1);
+ (*real)(0) = -polynomial(1) / polynomial(0);
+ }
+
+ if (imaginary != NULL) {
+ imaginary->setZero(1);
+ }
+}
+
+void FindQuadraticPolynomialRoots(const Vector& polynomial,
+ Vector* real,
+ Vector* imaginary) {
+ CHECK_EQ(polynomial.size(), 3);
+ const double a = polynomial(0);
+ const double b = polynomial(1);
+ const double c = polynomial(2);
+ const double D = b * b - 4 * a * c;
+ const double sqrt_D = sqrt(fabs(D));
+ if (real != NULL) {
+ real->setZero(2);
+ }
+ if (imaginary != NULL) {
+ imaginary->setZero(2);
+ }
+
+ // Real roots.
+ if (D >= 0) {
+ if (real != NULL) {
+ // Stable quadratic roots according to BKP Horn.
+ // http://people.csail.mit.edu/bkph/articles/Quadratics.pdf
+ if (b >= 0) {
+ (*real)(0) = (-b - sqrt_D) / (2.0 * a);
+ (*real)(1) = (2.0 * c) / (-b - sqrt_D);
+ } else {
+ (*real)(0) = (2.0 * c) / (-b + sqrt_D);
+ (*real)(1) = (-b + sqrt_D) / (2.0 * a);
+ }
+ }
+ return;
+ }
+
+ // Use the normal quadratic formula for the complex case.
+ if (real != NULL) {
+ (*real)(0) = -b / (2.0 * a);
+ (*real)(1) = -b / (2.0 * a);
+ }
+ if (imaginary != NULL) {
+ (*imaginary)(0) = sqrt_D / (2.0 * a);
+ (*imaginary)(1) = -sqrt_D / (2.0 * a);
+ }
+}
} // namespace
bool FindPolynomialRoots(const Vector& polynomial_in,
@@ -132,30 +190,40 @@ bool FindPolynomialRoots(const Vector& polynomial_in,
Vector polynomial = RemoveLeadingZeros(polynomial_in);
const int degree = polynomial.size() - 1;
+ VLOG(3) << "Input polynomial: " << polynomial_in.transpose();
+ if (polynomial.size() != polynomial_in.size()) {
+ VLOG(3) << "Trimmed polynomial: " << polynomial.transpose();
+ }
+
// Is the polynomial constant?
if (degree == 0) {
LOG(WARNING) << "Trying to extract roots from a constant "
<< "polynomial in FindPolynomialRoots";
+ // We return true with no roots, not false, as if the polynomial is constant
+ // it is correct that there are no roots. It is not the case that they were
+ // there, but that we have failed to extract them.
+ return true;
+ }
+
+ // Linear
+ if (degree == 1) {
+ FindLinearPolynomialRoots(polynomial, real, imaginary);
+ return true;
+ }
+
+ // Quadratic
+ if (degree == 2) {
+ FindQuadraticPolynomialRoots(polynomial, real, imaginary);
return true;
}
+ // The degree is now known to be at least 3. For cubic or higher
+ // roots we use the method of companion matrices.
+
// Divide by leading term
const double leading_term = polynomial(0);
polynomial /= leading_term;
- // Separately handle linear polynomials.
- if (degree == 1) {
- if (real != NULL) {
- real->resize(1);
- (*real)(0) = -polynomial(1);
- }
- if (imaginary != NULL) {
- imaginary->resize(1);
- imaginary->setZero();
- }
- }
-
- // The degree is now known to be at least 2.
// Build and balance the companion matrix to the polynomial.
Matrix companion_matrix(degree, degree);
BuildCompanionMatrix(polynomial, &companion_matrix);
@@ -255,6 +323,12 @@ void MinimizePolynomial(const Vector& polynomial,
}
}
+string FunctionSample::ToDebugString() const {
+ return StringPrintf("[x: %.8e, value: %.8e, gradient: %.8e, "
+ "value_is_valid: %d, gradient_is_valid: %d]",
+ x, value, gradient, value_is_valid, gradient_is_valid);
+}
+
Vector FindInterpolatingPolynomial(const vector<FunctionSample>& samples) {
const int num_samples = samples.size();
int num_constraints = 0;
@@ -268,6 +342,7 @@ Vector FindInterpolatingPolynomial(const vector<FunctionSample>& samples) {
}
const int degree = num_constraints - 1;
+
Matrix lhs = Matrix::Zero(num_constraints, num_constraints);
Vector rhs = Vector::Zero(num_constraints);
diff --git a/internal/ceres/polynomial.h b/internal/ceres/polynomial.h
index 42ffdcb..80ce77e 100644
--- a/internal/ceres/polynomial.h
+++ b/internal/ceres/polynomial.h
@@ -95,6 +95,7 @@ struct FunctionSample {
gradient(0.0),
gradient_is_valid(false) {
}
+ string ToDebugString() const;
double x;
double value; // value = f(x)
diff --git a/internal/ceres/preconditioner.cc b/internal/ceres/preconditioner.cc
index 505a47d..062347f 100644
--- a/internal/ceres/preconditioner.cc
+++ b/internal/ceres/preconditioner.cc
@@ -37,6 +37,16 @@ namespace internal {
Preconditioner::~Preconditioner() {
}
+PreconditionerType Preconditioner::PreconditionerForZeroEBlocks(
+ PreconditionerType preconditioner_type) {
+ if (preconditioner_type == SCHUR_JACOBI ||
+ preconditioner_type == CLUSTER_JACOBI ||
+ preconditioner_type == CLUSTER_TRIDIAGONAL) {
+ return JACOBI;
+ }
+ return preconditioner_type;
+}
+
SparseMatrixPreconditionerWrapper::SparseMatrixPreconditionerWrapper(
const SparseMatrix* matrix)
: matrix_(CHECK_NOTNULL(matrix)) {
diff --git a/internal/ceres/preconditioner.h b/internal/ceres/preconditioner.h
index af64e3c..e8d5994 100644
--- a/internal/ceres/preconditioner.h
+++ b/internal/ceres/preconditioner.h
@@ -36,6 +36,7 @@
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/linear_operator.h"
#include "ceres/sparse_matrix.h"
+#include "ceres/types.h"
namespace ceres {
namespace internal {
@@ -48,6 +49,7 @@ class Preconditioner : public LinearOperator {
struct Options {
Options()
: type(JACOBI),
+ visibility_clustering_type(CANONICAL_VIEWS),
sparse_linear_algebra_library_type(SUITE_SPARSE),
num_threads(1),
row_block_size(Eigen::Dynamic),
@@ -56,7 +58,7 @@ class Preconditioner : public LinearOperator {
}
PreconditionerType type;
-
+ VisibilityClusteringType visibility_clustering_type;
SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
// If possible, how many threads the preconditioner can use.
@@ -94,6 +96,14 @@ class Preconditioner : public LinearOperator {
int f_block_size;
};
+ // If the optimization problem is such that there are no remaining
+ // e-blocks, ITERATIVE_SCHUR with a Schur type preconditioner cannot
+ // be used. This function returns JACOBI if a preconditioner for
+ // ITERATIVE_SCHUR is used. The input preconditioner_type is
+ // returned otherwise.
+ static PreconditionerType PreconditionerForZeroEBlocks(
+ PreconditionerType preconditioner_type);
+
virtual ~Preconditioner();
// Update the numerical value of the preconditioner for the linear
diff --git a/internal/ceres/problem.cc b/internal/ceres/problem.cc
index 403e96a..674694d 100644
--- a/internal/ceres/problem.cc
+++ b/internal/ceres/problem.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -178,6 +178,23 @@ void Problem::SetParameterization(
problem_impl_->SetParameterization(values, local_parameterization);
}
+const LocalParameterization* Problem::GetParameterization(
+ double* values) const {
+ return problem_impl_->GetParameterization(values);
+}
+
+void Problem::SetParameterLowerBound(double* values,
+ int index,
+ double lower_bound) {
+ problem_impl_->SetParameterLowerBound(values, index, lower_bound);
+}
+
+void Problem::SetParameterUpperBound(double* values,
+ int index,
+ double upper_bound) {
+ problem_impl_->SetParameterUpperBound(values, index, upper_bound);
+}
+
bool Problem::Evaluate(const EvaluateOptions& evaluate_options,
double* cost,
vector<double>* residuals,
@@ -214,8 +231,31 @@ int Problem::ParameterBlockLocalSize(const double* parameter_block) const {
return problem_impl_->ParameterBlockLocalSize(parameter_block);
};
+bool Problem::HasParameterBlock(const double* values) const {
+ return problem_impl_->HasParameterBlock(values);
+}
+
void Problem::GetParameterBlocks(vector<double*>* parameter_blocks) const {
problem_impl_->GetParameterBlocks(parameter_blocks);
}
+void Problem::GetResidualBlocks(
+ vector<ResidualBlockId>* residual_blocks) const {
+ problem_impl_->GetResidualBlocks(residual_blocks);
+}
+
+void Problem::GetParameterBlocksForResidualBlock(
+ const ResidualBlockId residual_block,
+ vector<double*>* parameter_blocks) const {
+ problem_impl_->GetParameterBlocksForResidualBlock(residual_block,
+ parameter_blocks);
+}
+
+void Problem::GetResidualBlocksForParameterBlock(
+ const double* values,
+ vector<ResidualBlockId>* residual_blocks) const {
+ problem_impl_->GetResidualBlocksForParameterBlock(values,
+ residual_blocks);
+}
+
} // namespace ceres
diff --git a/internal/ceres/problem_impl.cc b/internal/ceres/problem_impl.cc
index 8302702..7c86efb 100644
--- a/internal/ceres/problem_impl.cc
+++ b/internal/ceres/problem_impl.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -27,7 +27,7 @@
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-// keir@google.com (Keir Mierle)
+// mierle@gmail.com (Keir Mierle)
#include "ceres/problem_impl.h"
@@ -142,7 +142,7 @@ ParameterBlock* ProblemImpl::InternalAddParameterBlock(double* values,
// For dynamic problems, add the list of dependent residual blocks, which is
// empty to start.
- if (options_.enable_fast_parameter_block_removal) {
+ if (options_.enable_fast_removal) {
new_parameter_block->EnableResidualBlockDependencies();
}
parameter_block_map_[values] = new_parameter_block;
@@ -150,6 +150,26 @@ ParameterBlock* ProblemImpl::InternalAddParameterBlock(double* values,
return new_parameter_block;
}
+void ProblemImpl::InternalRemoveResidualBlock(ResidualBlock* residual_block) {
+ CHECK_NOTNULL(residual_block);
+ // Perform no check on the validity of residual_block, that is handled in
+ // the public method: RemoveResidualBlock().
+
+ // If needed, remove the parameter dependencies on this residual block.
+ if (options_.enable_fast_removal) {
+ const int num_parameter_blocks_for_residual =
+ residual_block->NumParameterBlocks();
+ for (int i = 0; i < num_parameter_blocks_for_residual; ++i) {
+ residual_block->parameter_blocks()[i]
+ ->RemoveResidualBlock(residual_block);
+ }
+
+ ResidualBlockSet::iterator it = residual_block_set_.find(residual_block);
+ residual_block_set_.erase(it);
+ }
+ DeleteBlockInVector(program_->mutable_residual_blocks(), residual_block);
+}
+
// Deletes the residual block in question, assuming there are no other
// references to it inside the problem (e.g. by another parameter). Referenced
// cost and loss functions are tucked away for future deletion, since it is not
@@ -224,7 +244,7 @@ ResidualBlock* ProblemImpl::AddResidualBlock(
cost_function->parameter_block_sizes().size());
// Check the sizes match.
- const vector<int16>& parameter_block_sizes =
+ const vector<int32>& parameter_block_sizes =
cost_function->parameter_block_sizes();
if (!options_.disable_all_safety_checks) {
@@ -278,13 +298,18 @@ ResidualBlock* ProblemImpl::AddResidualBlock(
program_->residual_blocks_.size());
// Add dependencies on the residual to the parameter blocks.
- if (options_.enable_fast_parameter_block_removal) {
+ if (options_.enable_fast_removal) {
for (int i = 0; i < parameter_blocks.size(); ++i) {
parameter_block_ptrs[i]->AddResidualBlock(new_residual_block);
}
}
program_->residual_blocks_.push_back(new_residual_block);
+
+ if (options_.enable_fast_removal) {
+ residual_block_set_.insert(new_residual_block);
+ }
+
return new_residual_block;
}
@@ -452,7 +477,11 @@ template<typename Block>
void ProblemImpl::DeleteBlockInVector(vector<Block*>* mutable_blocks,
Block* block_to_remove) {
CHECK_EQ((*mutable_blocks)[block_to_remove->index()], block_to_remove)
- << "You found a Ceres bug! Block: " << block_to_remove->ToString();
+ << "You found a Ceres bug! \n"
+ << "Block requested: "
+ << block_to_remove->ToString() << "\n"
+ << "Block present: "
+ << (*mutable_blocks)[block_to_remove->index()]->ToString();
// Prepare the to-be-moved block for the new, lower-in-index position by
// setting the index to the blocks final location.
@@ -471,30 +500,46 @@ void ProblemImpl::DeleteBlockInVector(vector<Block*>* mutable_blocks,
void ProblemImpl::RemoveResidualBlock(ResidualBlock* residual_block) {
CHECK_NOTNULL(residual_block);
- // If needed, remove the parameter dependencies on this residual block.
- if (options_.enable_fast_parameter_block_removal) {
- const int num_parameter_blocks_for_residual =
- residual_block->NumParameterBlocks();
- for (int i = 0; i < num_parameter_blocks_for_residual; ++i) {
- residual_block->parameter_blocks()[i]
- ->RemoveResidualBlock(residual_block);
- }
+ // Verify that residual_block identifies a residual in the current problem.
+ const string residual_not_found_message =
+ StringPrintf("Residual block to remove: %p not found. This usually means "
+ "one of three things have happened:\n"
+ " 1) residual_block is uninitialised and points to a random "
+ "area in memory.\n"
+ " 2) residual_block represented a residual that was added to"
+ " the problem, but referred to a parameter block which has "
+ "since been removed, which removes all residuals which "
+ "depend on that parameter block, and was thus removed.\n"
+ " 3) residual_block referred to a residual that has already "
+ "been removed from the problem (by the user).",
+ residual_block);
+ if (options_.enable_fast_removal) {
+ CHECK(residual_block_set_.find(residual_block) !=
+ residual_block_set_.end())
+ << residual_not_found_message;
+ } else {
+ // Perform a full search over all current residuals.
+ CHECK(std::find(program_->residual_blocks().begin(),
+ program_->residual_blocks().end(),
+ residual_block) != program_->residual_blocks().end())
+ << residual_not_found_message;
}
- DeleteBlockInVector(program_->mutable_residual_blocks(), residual_block);
+
+ InternalRemoveResidualBlock(residual_block);
}
void ProblemImpl::RemoveParameterBlock(double* values) {
ParameterBlock* parameter_block =
FindParameterBlockOrDie(parameter_block_map_, values);
- if (options_.enable_fast_parameter_block_removal) {
+ if (options_.enable_fast_removal) {
// Copy the dependent residuals from the parameter block because the set of
// dependents will change after each call to RemoveResidualBlock().
vector<ResidualBlock*> residual_blocks_to_remove(
parameter_block->mutable_residual_blocks()->begin(),
parameter_block->mutable_residual_blocks()->end());
for (int i = 0; i < residual_blocks_to_remove.size(); ++i) {
- RemoveResidualBlock(residual_blocks_to_remove[i]);
+ InternalRemoveResidualBlock(residual_blocks_to_remove[i]);
}
} else {
// Scan all the residual blocks to remove ones that depend on the parameter
@@ -506,7 +551,7 @@ void ProblemImpl::RemoveParameterBlock(double* values) {
const int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
if (residual_block->parameter_blocks()[j] == parameter_block) {
- RemoveResidualBlock(residual_block);
+ InternalRemoveResidualBlock(residual_block);
// The parameter blocks are guaranteed unique.
break;
}
@@ -531,6 +576,26 @@ void ProblemImpl::SetParameterization(
->SetParameterization(local_parameterization);
}
+const LocalParameterization* ProblemImpl::GetParameterization(
+ double* values) const {
+ return FindParameterBlockOrDie(parameter_block_map_, values)
+ ->local_parameterization();
+}
+
+void ProblemImpl::SetParameterLowerBound(double* values,
+ int index,
+ double lower_bound) {
+ FindParameterBlockOrDie(parameter_block_map_, values)
+ ->SetLowerBound(index, lower_bound);
+}
+
+void ProblemImpl::SetParameterUpperBound(double* values,
+ int index,
+ double upper_bound) {
+ FindParameterBlockOrDie(parameter_block_map_, values)
+ ->SetUpperBound(index, upper_bound);
+}
+
bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
double* cost,
vector<double>* residuals,
@@ -634,6 +699,9 @@ bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
for (int i = 0; i < variable_parameter_blocks.size(); ++i) {
variable_parameter_blocks[i]->SetVarying();
}
+
+ program_->SetParameterBlockStatePtrsToUserStatePtrs();
+ program_->SetParameterOffsetsAndIndex();
return false;
}
@@ -692,6 +760,8 @@ bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
}
}
+ program_->SetParameterBlockStatePtrsToUserStatePtrs();
+ program_->SetParameterOffsetsAndIndex();
return status;
}
@@ -721,6 +791,11 @@ int ProblemImpl::ParameterBlockLocalSize(const double* parameter_block) const {
parameter_block_map_, const_cast<double*>(parameter_block))->LocalSize();
};
+bool ProblemImpl::HasParameterBlock(const double* parameter_block) const {
+ return (parameter_block_map_.find(const_cast<double*>(parameter_block)) !=
+ parameter_block_map_.end());
+}
+
void ProblemImpl::GetParameterBlocks(vector<double*>* parameter_blocks) const {
CHECK_NOTNULL(parameter_blocks);
parameter_blocks->resize(0);
@@ -731,6 +806,57 @@ void ProblemImpl::GetParameterBlocks(vector<double*>* parameter_blocks) const {
}
}
+void ProblemImpl::GetResidualBlocks(
+ vector<ResidualBlockId>* residual_blocks) const {
+ CHECK_NOTNULL(residual_blocks);
+ *residual_blocks = program().residual_blocks();
+}
+
+void ProblemImpl::GetParameterBlocksForResidualBlock(
+ const ResidualBlockId residual_block,
+ vector<double*>* parameter_blocks) const {
+ int num_parameter_blocks = residual_block->NumParameterBlocks();
+ CHECK_NOTNULL(parameter_blocks)->resize(num_parameter_blocks);
+ for (int i = 0; i < num_parameter_blocks; ++i) {
+ (*parameter_blocks)[i] =
+ residual_block->parameter_blocks()[i]->mutable_user_state();
+ }
+}
+
+void ProblemImpl::GetResidualBlocksForParameterBlock(
+ const double* values,
+ vector<ResidualBlockId>* residual_blocks) const {
+ ParameterBlock* parameter_block =
+ FindParameterBlockOrDie(parameter_block_map_,
+ const_cast<double*>(values));
+
+ if (options_.enable_fast_removal) {
+ // In this case the residual blocks that depend on the parameter block are
+ // stored in the parameter block already, so just copy them out.
+ CHECK_NOTNULL(residual_blocks)->resize(
+ parameter_block->mutable_residual_blocks()->size());
+ std::copy(parameter_block->mutable_residual_blocks()->begin(),
+ parameter_block->mutable_residual_blocks()->end(),
+ residual_blocks->begin());
+ return;
+ }
+
+ // Find residual blocks that depend on the parameter block.
+ CHECK_NOTNULL(residual_blocks)->clear();
+ const int num_residual_blocks = NumResidualBlocks();
+ for (int i = 0; i < num_residual_blocks; ++i) {
+ ResidualBlock* residual_block =
+ (*(program_->mutable_residual_blocks()))[i];
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ if (residual_block->parameter_blocks()[j] == parameter_block) {
+ residual_blocks->push_back(residual_block);
+ // The parameter blocks are guaranteed unique.
+ break;
+ }
+ }
+ }
+}
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/problem_impl.h b/internal/ceres/problem_impl.h
index ace27f5..7b5547b 100644
--- a/internal/ceres/problem_impl.h
+++ b/internal/ceres/problem_impl.h
@@ -45,6 +45,7 @@
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
+#include "ceres/collections_port.h"
#include "ceres/problem.h"
#include "ceres/types.h"
@@ -63,6 +64,7 @@ class ResidualBlock;
class ProblemImpl {
public:
typedef map<double*, ParameterBlock*> ParameterMap;
+ typedef HashSet<ResidualBlock*> ResidualBlockSet;
ProblemImpl();
explicit ProblemImpl(const Problem::Options& options);
@@ -127,6 +129,10 @@ class ProblemImpl {
void SetParameterBlockVariable(double* values);
void SetParameterization(double* values,
LocalParameterization* local_parameterization);
+ const LocalParameterization* GetParameterization(double* values) const;
+
+ void SetParameterLowerBound(double* values, int index, double lower_bound);
+ void SetParameterUpperBound(double* values, int index, double upper_bound);
bool Evaluate(const Problem::EvaluateOptions& options,
double* cost,
@@ -141,15 +147,33 @@ class ProblemImpl {
int ParameterBlockSize(const double* parameter_block) const;
int ParameterBlockLocalSize(const double* parameter_block) const;
+
+ bool HasParameterBlock(const double* parameter_block) const;
+
void GetParameterBlocks(vector<double*>* parameter_blocks) const;
+ void GetResidualBlocks(vector<ResidualBlockId>* residual_blocks) const;
+
+ void GetParameterBlocksForResidualBlock(
+ const ResidualBlockId residual_block,
+ vector<double*>* parameter_blocks) const;
+
+ void GetResidualBlocksForParameterBlock(
+ const double* values,
+ vector<ResidualBlockId>* residual_blocks) const;
const Program& program() const { return *program_; }
Program* mutable_program() { return program_.get(); }
const ParameterMap& parameter_map() const { return parameter_block_map_; }
+ const ResidualBlockSet& residual_block_set() const {
+ CHECK(options_.enable_fast_removal)
+ << "Fast removal not enabled, residual_block_set is not maintained.";
+ return residual_block_set_;
+ }
private:
ParameterBlock* InternalAddParameterBlock(double* values, int size);
+ void InternalRemoveResidualBlock(ResidualBlock* residual_block);
bool InternalEvaluate(Program* program,
double* cost,
@@ -171,6 +195,9 @@ class ProblemImpl {
// The mapping from user pointers to parameter blocks.
map<double*, ParameterBlock*> parameter_block_map_;
+ // Iff enable_fast_removal is enabled, contains the current residual blocks.
+ ResidualBlockSet residual_block_set_;
+
// The actual parameter and residual blocks.
internal::scoped_ptr<internal::Program> program_;
diff --git a/internal/ceres/problem_test.cc b/internal/ceres/problem_test.cc
index 0944d3f..db082ec 100644
--- a/internal/ceres/problem_test.cc
+++ b/internal/ceres/problem_test.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -56,7 +56,7 @@ namespace internal {
// Trivial cost function that accepts a single argument.
class UnaryCostFunction : public CostFunction {
public:
- UnaryCostFunction(int num_residuals, int16 parameter_block_size) {
+ UnaryCostFunction(int num_residuals, int32 parameter_block_size) {
set_num_residuals(num_residuals);
mutable_parameter_block_sizes()->push_back(parameter_block_size);
}
@@ -76,8 +76,8 @@ class UnaryCostFunction : public CostFunction {
class BinaryCostFunction: public CostFunction {
public:
BinaryCostFunction(int num_residuals,
- int16 parameter_block1_size,
- int16 parameter_block2_size) {
+ int32 parameter_block1_size,
+ int32 parameter_block2_size) {
set_num_residuals(num_residuals);
mutable_parameter_block_sizes()->push_back(parameter_block1_size);
mutable_parameter_block_sizes()->push_back(parameter_block2_size);
@@ -97,9 +97,9 @@ class BinaryCostFunction: public CostFunction {
class TernaryCostFunction: public CostFunction {
public:
TernaryCostFunction(int num_residuals,
- int16 parameter_block1_size,
- int16 parameter_block2_size,
- int16 parameter_block3_size) {
+ int32 parameter_block1_size,
+ int32 parameter_block2_size,
+ int32 parameter_block3_size) {
set_num_residuals(num_residuals);
mutable_parameter_block_sizes()->push_back(parameter_block1_size);
mutable_parameter_block_sizes()->push_back(parameter_block2_size);
@@ -139,7 +139,7 @@ TEST(Problem, AddResidualWithIncorrectNumberOfParameterBlocksDies) {
// UnaryCostFunction takes only one parameter, but two are passed.
EXPECT_DEATH_IF_SUPPORTED(
problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x, y),
- "parameter_blocks.size()");
+ "parameter_blocks.size");
}
TEST(Problem, AddResidualWithDifferentSizesOnTheSameVariableDies) {
@@ -378,7 +378,7 @@ TEST(Problem, CostFunctionsAreDeletedEvenWithRemovals) {
struct DynamicProblem : public ::testing::TestWithParam<bool> {
DynamicProblem() {
Problem::Options options;
- options.enable_fast_parameter_block_removal = GetParam();
+ options.enable_fast_removal = GetParam();
problem.reset(new ProblemImpl(options));
}
@@ -390,9 +390,26 @@ struct DynamicProblem : public ::testing::TestWithParam<bool> {
}
bool HasResidualBlock(ResidualBlock* residual_block) {
- return find(problem->program().residual_blocks().begin(),
- problem->program().residual_blocks().end(),
- residual_block) != problem->program().residual_blocks().end();
+ bool have_residual_block = true;
+ if (GetParam()) {
+ have_residual_block &=
+ (problem->residual_block_set().find(residual_block) !=
+ problem->residual_block_set().end());
+ }
+ have_residual_block &=
+ find(problem->program().residual_blocks().begin(),
+ problem->program().residual_blocks().end(),
+ residual_block) != problem->program().residual_blocks().end();
+ return have_residual_block;
+ }
+
+ int NumResidualBlocks() {
+ // Verify that the hash set of residuals is maintained consistently.
+ if (GetParam()) {
+ EXPECT_EQ(problem->residual_block_set().size(),
+ problem->NumResidualBlocks());
+ }
+ return problem->NumResidualBlocks();
}
// The next block of functions until the end are only for testing the
@@ -502,6 +519,20 @@ TEST(Problem, RemoveParameterBlockWithUnknownPtrDies) {
problem.RemoveParameterBlock(y), "Parameter block not found:");
}
+TEST(Problem, GetParameterization) {
+ double x[3];
+ double y[2];
+
+ Problem problem;
+ problem.AddParameterBlock(x, 3);
+ problem.AddParameterBlock(y, 2);
+
+ LocalParameterization* parameterization = new IdentityParameterization(3);
+ problem.SetParameterization(x, parameterization);
+ EXPECT_EQ(problem.GetParameterization(x), parameterization);
+ EXPECT_TRUE(problem.GetParameterization(y) == NULL);
+}
+
TEST(Problem, ParameterBlockQueryTest) {
double x[3];
double y[4];
@@ -525,7 +556,9 @@ TEST(Problem, ParameterBlockQueryTest) {
EXPECT_TRUE(parameter_blocks[0] == x || parameter_blocks[0] == y);
EXPECT_TRUE(parameter_blocks[1] == x || parameter_blocks[1] == y);
+ EXPECT_TRUE(problem.HasParameterBlock(x));
problem.RemoveParameterBlock(x);
+ EXPECT_FALSE(problem.HasParameterBlock(x));
problem.GetParameterBlocks(&parameter_blocks);
EXPECT_EQ(parameter_blocks.size(), 1);
EXPECT_TRUE(parameter_blocks[0] == y);
@@ -536,7 +569,7 @@ TEST_P(DynamicProblem, RemoveParameterBlockWithNoResiduals) {
problem->AddParameterBlock(z, 5);
problem->AddParameterBlock(w, 3);
ASSERT_EQ(3, problem->NumParameterBlocks());
- ASSERT_EQ(0, problem->NumResidualBlocks());
+ ASSERT_EQ(0, NumResidualBlocks());
EXPECT_EQ(y, GetParameterBlock(0)->user_state());
EXPECT_EQ(z, GetParameterBlock(1)->user_state());
EXPECT_EQ(w, GetParameterBlock(2)->user_state());
@@ -545,12 +578,12 @@ TEST_P(DynamicProblem, RemoveParameterBlockWithNoResiduals) {
// removing it.
problem->RemoveParameterBlock(w);
ASSERT_EQ(2, problem->NumParameterBlocks());
- ASSERT_EQ(0, problem->NumResidualBlocks());
+ ASSERT_EQ(0, NumResidualBlocks());
EXPECT_EQ(y, GetParameterBlock(0)->user_state());
EXPECT_EQ(z, GetParameterBlock(1)->user_state());
problem->AddParameterBlock(w, 3);
ASSERT_EQ(3, problem->NumParameterBlocks());
- ASSERT_EQ(0, problem->NumResidualBlocks());
+ ASSERT_EQ(0, NumResidualBlocks());
EXPECT_EQ(y, GetParameterBlock(0)->user_state());
EXPECT_EQ(z, GetParameterBlock(1)->user_state());
EXPECT_EQ(w, GetParameterBlock(2)->user_state());
@@ -558,12 +591,12 @@ TEST_P(DynamicProblem, RemoveParameterBlockWithNoResiduals) {
// Now remove z, which is in the middle, and add it back.
problem->RemoveParameterBlock(z);
ASSERT_EQ(2, problem->NumParameterBlocks());
- ASSERT_EQ(0, problem->NumResidualBlocks());
+ ASSERT_EQ(0, NumResidualBlocks());
EXPECT_EQ(y, GetParameterBlock(0)->user_state());
EXPECT_EQ(w, GetParameterBlock(1)->user_state());
problem->AddParameterBlock(z, 5);
ASSERT_EQ(3, problem->NumParameterBlocks());
- ASSERT_EQ(0, problem->NumResidualBlocks());
+ ASSERT_EQ(0, NumResidualBlocks());
EXPECT_EQ(y, GetParameterBlock(0)->user_state());
EXPECT_EQ(w, GetParameterBlock(1)->user_state());
EXPECT_EQ(z, GetParameterBlock(2)->user_state());
@@ -572,20 +605,20 @@ TEST_P(DynamicProblem, RemoveParameterBlockWithNoResiduals) {
// y
problem->RemoveParameterBlock(y);
ASSERT_EQ(2, problem->NumParameterBlocks());
- ASSERT_EQ(0, problem->NumResidualBlocks());
+ ASSERT_EQ(0, NumResidualBlocks());
EXPECT_EQ(z, GetParameterBlock(0)->user_state());
EXPECT_EQ(w, GetParameterBlock(1)->user_state());
// z
problem->RemoveParameterBlock(z);
ASSERT_EQ(1, problem->NumParameterBlocks());
- ASSERT_EQ(0, problem->NumResidualBlocks());
+ ASSERT_EQ(0, NumResidualBlocks());
EXPECT_EQ(w, GetParameterBlock(0)->user_state());
// w
problem->RemoveParameterBlock(w);
EXPECT_EQ(0, problem->NumParameterBlocks());
- EXPECT_EQ(0, problem->NumResidualBlocks());
+ EXPECT_EQ(0, NumResidualBlocks());
}
TEST_P(DynamicProblem, RemoveParameterBlockWithResiduals) {
@@ -593,7 +626,7 @@ TEST_P(DynamicProblem, RemoveParameterBlockWithResiduals) {
problem->AddParameterBlock(z, 5);
problem->AddParameterBlock(w, 3);
ASSERT_EQ(3, problem->NumParameterBlocks());
- ASSERT_EQ(0, problem->NumResidualBlocks());
+ ASSERT_EQ(0, NumResidualBlocks());
EXPECT_EQ(y, GetParameterBlock(0)->user_state());
EXPECT_EQ(z, GetParameterBlock(1)->user_state());
EXPECT_EQ(w, GetParameterBlock(2)->user_state());
@@ -616,12 +649,12 @@ TEST_P(DynamicProblem, RemoveParameterBlockWithResiduals) {
ResidualBlock* r_w = problem->AddResidualBlock(cost_w, NULL, w);
EXPECT_EQ(3, problem->NumParameterBlocks());
- EXPECT_EQ(7, problem->NumResidualBlocks());
+ EXPECT_EQ(7, NumResidualBlocks());
// Remove w, which should remove r_yzw, r_yw, r_zw, r_w.
problem->RemoveParameterBlock(w);
ASSERT_EQ(2, problem->NumParameterBlocks());
- ASSERT_EQ(3, problem->NumResidualBlocks());
+ ASSERT_EQ(3, NumResidualBlocks());
ASSERT_FALSE(HasResidualBlock(r_yzw));
ASSERT_TRUE (HasResidualBlock(r_yz ));
@@ -634,7 +667,7 @@ TEST_P(DynamicProblem, RemoveParameterBlockWithResiduals) {
// Remove z, which will remove almost everything else.
problem->RemoveParameterBlock(z);
ASSERT_EQ(1, problem->NumParameterBlocks());
- ASSERT_EQ(1, problem->NumResidualBlocks());
+ ASSERT_EQ(1, NumResidualBlocks());
ASSERT_FALSE(HasResidualBlock(r_yzw));
ASSERT_FALSE(HasResidualBlock(r_yz ));
@@ -647,7 +680,7 @@ TEST_P(DynamicProblem, RemoveParameterBlockWithResiduals) {
// Remove y; all gone.
problem->RemoveParameterBlock(y);
EXPECT_EQ(0, problem->NumParameterBlocks());
- EXPECT_EQ(0, problem->NumResidualBlocks());
+ EXPECT_EQ(0, NumResidualBlocks());
}
TEST_P(DynamicProblem, RemoveResidualBlock) {
@@ -685,14 +718,14 @@ TEST_P(DynamicProblem, RemoveResidualBlock) {
EXPECT_TRUE(GetParameterBlock(2)->mutable_residual_blocks() == NULL);
}
EXPECT_EQ(3, problem->NumParameterBlocks());
- EXPECT_EQ(7, problem->NumResidualBlocks());
+ EXPECT_EQ(7, NumResidualBlocks());
// Remove each residual and check the state after each removal.
// Remove r_yzw.
problem->RemoveResidualBlock(r_yzw);
ASSERT_EQ(3, problem->NumParameterBlocks());
- ASSERT_EQ(6, problem->NumResidualBlocks());
+ ASSERT_EQ(6, NumResidualBlocks());
if (GetParam()) {
ExpectParameterBlockContains(y, r_yz, r_yw, r_y);
ExpectParameterBlockContains(z, r_yz, r_zw, r_z);
@@ -708,7 +741,7 @@ TEST_P(DynamicProblem, RemoveResidualBlock) {
// Remove r_yw.
problem->RemoveResidualBlock(r_yw);
ASSERT_EQ(3, problem->NumParameterBlocks());
- ASSERT_EQ(5, problem->NumResidualBlocks());
+ ASSERT_EQ(5, NumResidualBlocks());
if (GetParam()) {
ExpectParameterBlockContains(y, r_yz, r_y);
ExpectParameterBlockContains(z, r_yz, r_zw, r_z);
@@ -723,7 +756,7 @@ TEST_P(DynamicProblem, RemoveResidualBlock) {
// Remove r_zw.
problem->RemoveResidualBlock(r_zw);
ASSERT_EQ(3, problem->NumParameterBlocks());
- ASSERT_EQ(4, problem->NumResidualBlocks());
+ ASSERT_EQ(4, NumResidualBlocks());
if (GetParam()) {
ExpectParameterBlockContains(y, r_yz, r_y);
ExpectParameterBlockContains(z, r_yz, r_z);
@@ -737,7 +770,7 @@ TEST_P(DynamicProblem, RemoveResidualBlock) {
// Remove r_w.
problem->RemoveResidualBlock(r_w);
ASSERT_EQ(3, problem->NumParameterBlocks());
- ASSERT_EQ(3, problem->NumResidualBlocks());
+ ASSERT_EQ(3, NumResidualBlocks());
if (GetParam()) {
ExpectParameterBlockContains(y, r_yz, r_y);
ExpectParameterBlockContains(z, r_yz, r_z);
@@ -750,7 +783,7 @@ TEST_P(DynamicProblem, RemoveResidualBlock) {
// Remove r_yz.
problem->RemoveResidualBlock(r_yz);
ASSERT_EQ(3, problem->NumParameterBlocks());
- ASSERT_EQ(2, problem->NumResidualBlocks());
+ ASSERT_EQ(2, NumResidualBlocks());
if (GetParam()) {
ExpectParameterBlockContains(y, r_y);
ExpectParameterBlockContains(z, r_z);
@@ -763,7 +796,7 @@ TEST_P(DynamicProblem, RemoveResidualBlock) {
problem->RemoveResidualBlock(r_z);
problem->RemoveResidualBlock(r_y);
ASSERT_EQ(3, problem->NumParameterBlocks());
- ASSERT_EQ(0, problem->NumResidualBlocks());
+ ASSERT_EQ(0, NumResidualBlocks());
if (GetParam()) {
ExpectParameterBlockContains(y);
ExpectParameterBlockContains(z);
@@ -771,6 +804,191 @@ TEST_P(DynamicProblem, RemoveResidualBlock) {
}
}
+TEST_P(DynamicProblem, RemoveInvalidResidualBlockDies) {
+ problem->AddParameterBlock(y, 4);
+ problem->AddParameterBlock(z, 5);
+ problem->AddParameterBlock(w, 3);
+
+ // Add all combinations of cost functions.
+ CostFunction* cost_yzw = new TernaryCostFunction(1, 4, 5, 3);
+ CostFunction* cost_yz = new BinaryCostFunction (1, 4, 5);
+ CostFunction* cost_yw = new BinaryCostFunction (1, 4, 3);
+ CostFunction* cost_zw = new BinaryCostFunction (1, 5, 3);
+ CostFunction* cost_y = new UnaryCostFunction (1, 4);
+ CostFunction* cost_z = new UnaryCostFunction (1, 5);
+ CostFunction* cost_w = new UnaryCostFunction (1, 3);
+
+ ResidualBlock* r_yzw = problem->AddResidualBlock(cost_yzw, NULL, y, z, w);
+ ResidualBlock* r_yz = problem->AddResidualBlock(cost_yz, NULL, y, z);
+ ResidualBlock* r_yw = problem->AddResidualBlock(cost_yw, NULL, y, w);
+ ResidualBlock* r_zw = problem->AddResidualBlock(cost_zw, NULL, z, w);
+ ResidualBlock* r_y = problem->AddResidualBlock(cost_y, NULL, y);
+ ResidualBlock* r_z = problem->AddResidualBlock(cost_z, NULL, z);
+ ResidualBlock* r_w = problem->AddResidualBlock(cost_w, NULL, w);
+
+ // Remove r_yzw.
+ problem->RemoveResidualBlock(r_yzw);
+ ASSERT_EQ(3, problem->NumParameterBlocks());
+ ASSERT_EQ(6, NumResidualBlocks());
+ // Attempt to remove r_yzw again.
+ EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(r_yzw), "not found");
+
+ // Attempt to remove a cast pointer never added as a residual.
+ int trash_memory = 1234;
+ ResidualBlock* invalid_residual =
+ reinterpret_cast<ResidualBlock*>(&trash_memory);
+ EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(invalid_residual),
+ "not found");
+
+ // Remove a parameter block, which in turn removes the dependent residuals
+ // then attempt to remove them directly.
+ problem->RemoveParameterBlock(z);
+ ASSERT_EQ(2, problem->NumParameterBlocks());
+ ASSERT_EQ(3, NumResidualBlocks());
+ EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(r_yz), "not found");
+ EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(r_zw), "not found");
+ EXPECT_DEATH_IF_SUPPORTED(problem->RemoveResidualBlock(r_z), "not found");
+
+ problem->RemoveResidualBlock(r_yw);
+ problem->RemoveResidualBlock(r_w);
+ problem->RemoveResidualBlock(r_y);
+}
+
+// Check that a null-terminated array, a, has the same elements as b.
+template<typename T>
+void ExpectVectorContainsUnordered(const T* a, const vector<T>& b) {
+ // Compute the size of a.
+ int size = 0;
+ while (a[size]) {
+ ++size;
+ }
+ ASSERT_EQ(size, b.size());
+
+ // Sort a.
+ vector<T> a_sorted(size);
+ copy(a, a + size, a_sorted.begin());
+ sort(a_sorted.begin(), a_sorted.end());
+
+ // Sort b.
+ vector<T> b_sorted(b);
+ sort(b_sorted.begin(), b_sorted.end());
+
+ // Compare.
+ for (int i = 0; i < size; ++i) {
+ EXPECT_EQ(a_sorted[i], b_sorted[i]);
+ }
+}
+
+void ExpectProblemHasResidualBlocks(
+ const ProblemImpl &problem,
+ const ResidualBlockId *expected_residual_blocks) {
+ vector<ResidualBlockId> residual_blocks;
+ problem.GetResidualBlocks(&residual_blocks);
+ ExpectVectorContainsUnordered(expected_residual_blocks, residual_blocks);
+}
+
+TEST_P(DynamicProblem, GetXXXBlocksForYYYBlock) {
+ problem->AddParameterBlock(y, 4);
+ problem->AddParameterBlock(z, 5);
+ problem->AddParameterBlock(w, 3);
+
+ // Add all combinations of cost functions.
+ CostFunction* cost_yzw = new TernaryCostFunction(1, 4, 5, 3);
+ CostFunction* cost_yz = new BinaryCostFunction (1, 4, 5);
+ CostFunction* cost_yw = new BinaryCostFunction (1, 4, 3);
+ CostFunction* cost_zw = new BinaryCostFunction (1, 5, 3);
+ CostFunction* cost_y = new UnaryCostFunction (1, 4);
+ CostFunction* cost_z = new UnaryCostFunction (1, 5);
+ CostFunction* cost_w = new UnaryCostFunction (1, 3);
+
+ ResidualBlock* r_yzw = problem->AddResidualBlock(cost_yzw, NULL, y, z, w);
+ {
+ ResidualBlockId expected_residuals[] = {r_yzw, 0};
+ ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+ }
+ ResidualBlock* r_yz = problem->AddResidualBlock(cost_yz, NULL, y, z);
+ {
+ ResidualBlockId expected_residuals[] = {r_yzw, r_yz, 0};
+ ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+ }
+ ResidualBlock* r_yw = problem->AddResidualBlock(cost_yw, NULL, y, w);
+ {
+ ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, 0};
+ ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+ }
+ ResidualBlock* r_zw = problem->AddResidualBlock(cost_zw, NULL, z, w);
+ {
+ ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, r_zw, 0};
+ ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+ }
+ ResidualBlock* r_y = problem->AddResidualBlock(cost_y, NULL, y);
+ {
+ ResidualBlock *expected_residuals[] = {r_yzw, r_yz, r_yw, r_zw, r_y, 0};
+ ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+ }
+ ResidualBlock* r_z = problem->AddResidualBlock(cost_z, NULL, z);
+ {
+ ResidualBlock *expected_residuals[] = {
+ r_yzw, r_yz, r_yw, r_zw, r_y, r_z, 0
+ };
+ ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+ }
+ ResidualBlock* r_w = problem->AddResidualBlock(cost_w, NULL, w);
+ {
+ ResidualBlock *expected_residuals[] = {
+ r_yzw, r_yz, r_yw, r_zw, r_y, r_z, r_w, 0
+ };
+ ExpectProblemHasResidualBlocks(*problem, expected_residuals);
+ }
+
+ vector<double*> parameter_blocks;
+ vector<ResidualBlockId> residual_blocks;
+
+ // Check GetResidualBlocksForParameterBlock() for all parameter blocks.
+ struct GetResidualBlocksForParameterBlockTestCase {
+ double* parameter_block;
+ ResidualBlockId expected_residual_blocks[10];
+ };
+ GetResidualBlocksForParameterBlockTestCase get_residual_blocks_cases[] = {
+ { y, { r_yzw, r_yz, r_yw, r_y, NULL} },
+ { z, { r_yzw, r_yz, r_zw, r_z, NULL} },
+ { w, { r_yzw, r_yw, r_zw, r_w, NULL} },
+ { NULL }
+ };
+ for (int i = 0; get_residual_blocks_cases[i].parameter_block; ++i) {
+ problem->GetResidualBlocksForParameterBlock(
+ get_residual_blocks_cases[i].parameter_block,
+ &residual_blocks);
+ ExpectVectorContainsUnordered(
+ get_residual_blocks_cases[i].expected_residual_blocks,
+ residual_blocks);
+ }
+
+ // Check GetParameterBlocksForResidualBlock() for all residual blocks.
+ struct GetParameterBlocksForResidualBlockTestCase {
+ ResidualBlockId residual_block;
+ double* expected_parameter_blocks[10];
+ };
+ GetParameterBlocksForResidualBlockTestCase get_parameter_blocks_cases[] = {
+ { r_yzw, { y, z, w, NULL } },
+ { r_yz , { y, z, NULL } },
+ { r_yw , { y, w, NULL } },
+ { r_zw , { z, w, NULL } },
+ { r_y , { y, NULL } },
+ { r_z , { z, NULL } },
+ { r_w , { w, NULL } },
+ { NULL }
+ };
+ for (int i = 0; get_parameter_blocks_cases[i].residual_block; ++i) {
+ problem->GetParameterBlocksForResidualBlock(
+ get_parameter_blocks_cases[i].residual_block,
+ &parameter_blocks);
+ ExpectVectorContainsUnordered(
+ get_parameter_blocks_cases[i].expected_parameter_blocks,
+ parameter_blocks);
+ }
+}
+
INSTANTIATE_TEST_CASE_P(OptionsInstantiation,
DynamicProblem,
::testing::Values(true, false));
@@ -862,7 +1080,9 @@ class ProblemEvaluateTest : public ::testing::Test {
parameters_));
}
-
+ void TearDown() {
+ EXPECT_TRUE(problem_.program().IsValid());
+ }
void EvaluateAndCompare(const Problem::EvaluateOptions& options,
const int expected_num_rows,
diff --git a/internal/ceres/program.cc b/internal/ceres/program.cc
index 82d76d3..1d0a157 100644
--- a/internal/ceres/program.cc
+++ b/internal/ceres/program.cc
@@ -32,6 +32,7 @@
#include <map>
#include <vector>
+#include "ceres/array_utils.h"
#include "ceres/casts.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/cost_function.h"
@@ -44,6 +45,7 @@
#include "ceres/problem.h"
#include "ceres/residual_block.h"
#include "ceres/stl_util.h"
+#include "ceres/triplet_sparse_matrix.h"
namespace ceres {
namespace internal {
@@ -140,6 +142,289 @@ void Program::SetParameterOffsetsAndIndex() {
}
}
+bool Program::IsValid() const {
+ for (int i = 0; i < residual_blocks_.size(); ++i) {
+ const ResidualBlock* residual_block = residual_blocks_[i];
+ if (residual_block->index() != i) {
+ LOG(WARNING) << "Residual block: " << i
+ << " has incorrect index: " << residual_block->index();
+ return false;
+ }
+ }
+
+ int state_offset = 0;
+ int delta_offset = 0;
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ const ParameterBlock* parameter_block = parameter_blocks_[i];
+ if (parameter_block->index() != i ||
+ parameter_block->state_offset() != state_offset ||
+ parameter_block->delta_offset() != delta_offset) {
+ LOG(WARNING) << "Parameter block: " << i
+ << "has incorrect indexing information: "
+ << parameter_block->ToString();
+ return false;
+ }
+
+ state_offset += parameter_blocks_[i]->Size();
+ delta_offset += parameter_blocks_[i]->LocalSize();
+ }
+
+ return true;
+}
+
+bool Program::ParameterBlocksAreFinite(string* message) const {
+ CHECK_NOTNULL(message);
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ const ParameterBlock* parameter_block = parameter_blocks_[i];
+ const double* array = parameter_block->user_state();
+ const int size = parameter_block->Size();
+ const int invalid_index = FindInvalidValue(size, array);
+ if (invalid_index != size) {
+ *message = StringPrintf(
+ "ParameterBlock: %p with size %d has at least one invalid value.\n"
+ "First invalid value is at index: %d.\n"
+ "Parameter block values: ",
+ array, size, invalid_index);
+ AppendArrayToString(size, array, message);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Program::IsBoundsConstrained() const {
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ const ParameterBlock* parameter_block = parameter_blocks_[i];
+ if (parameter_block->IsConstant()) {
+ continue;
+ }
+ const int size = parameter_block->Size();
+ for (int j = 0; j < size; ++j) {
+ const double lower_bound = parameter_block->LowerBoundForParameter(j);
+ const double upper_bound = parameter_block->UpperBoundForParameter(j);
+ if (lower_bound > -std::numeric_limits<double>::max() ||
+ upper_bound < std::numeric_limits<double>::max()) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+bool Program::IsFeasible(string* message) const {
+ CHECK_NOTNULL(message);
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ const ParameterBlock* parameter_block = parameter_blocks_[i];
+ const double* parameters = parameter_block->user_state();
+ const int size = parameter_block->Size();
+ if (parameter_block->IsConstant()) {
+ // Constant parameter blocks must start in the feasible region
+ // to ultimately produce a feasible solution, since Ceres cannot
+ // change them.
+ for (int j = 0; j < size; ++j) {
+ const double lower_bound = parameter_block->LowerBoundForParameter(j);
+ const double upper_bound = parameter_block->UpperBoundForParameter(j);
+ if (parameters[j] < lower_bound || parameters[j] > upper_bound) {
+ *message = StringPrintf(
+ "ParameterBlock: %p with size %d has at least one infeasible "
+ "value."
+ "\nFirst infeasible value is at index: %d."
+ "\nLower bound: %e, value: %e, upper bound: %e"
+ "\nParameter block values: ",
+ parameters, size, j, lower_bound, parameters[j], upper_bound);
+ AppendArrayToString(size, parameters, message);
+ return false;
+ }
+ }
+ } else {
+ // Variable parameter blocks must have non-empty feasible
+ // regions, otherwise there is no way to produce a feasible
+ // solution.
+ for (int j = 0; j < size; ++j) {
+ const double lower_bound = parameter_block->LowerBoundForParameter(j);
+ const double upper_bound = parameter_block->UpperBoundForParameter(j);
+ if (lower_bound >= upper_bound) {
+ *message = StringPrintf(
+ "ParameterBlock: %p with size %d has at least one infeasible "
+ "bound."
+ "\nFirst infeasible bound is at index: %d."
+ "\nLower bound: %e, upper bound: %e"
+ "\nParameter block values: ",
+ parameters, size, j, lower_bound, upper_bound);
+ AppendArrayToString(size, parameters, message);
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+Program* Program::CreateReducedProgram(vector<double*>* removed_parameter_blocks,
+ double* fixed_cost,
+ string* error) const {
+ CHECK_NOTNULL(removed_parameter_blocks);
+ CHECK_NOTNULL(fixed_cost);
+ CHECK_NOTNULL(error);
+
+ scoped_ptr<Program> reduced_program(new Program(*this));
+ if (!reduced_program->RemoveFixedBlocks(removed_parameter_blocks,
+ fixed_cost,
+ error)) {
+ return NULL;
+ }
+
+ reduced_program->SetParameterOffsetsAndIndex();
+ return reduced_program.release();
+}
+
+bool Program::RemoveFixedBlocks(vector<double*>* removed_parameter_blocks,
+ double* fixed_cost,
+ string* error) {
+ CHECK_NOTNULL(removed_parameter_blocks);
+ CHECK_NOTNULL(fixed_cost);
+ CHECK_NOTNULL(error);
+
+ scoped_array<double> residual_block_evaluate_scratch;
+ residual_block_evaluate_scratch.reset(
+ new double[MaxScratchDoublesNeededForEvaluate()]);
+ *fixed_cost = 0.0;
+
+ // Mark all the parameters as unused. Abuse the index member of the
+ // parameter blocks for the marking.
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ parameter_blocks_[i]->set_index(-1);
+ }
+
+ // Filter out residual that have all-constant parameters, and mark
+ // all the parameter blocks that appear in residuals.
+ int num_active_residual_blocks = 0;
+ for (int i = 0; i < residual_blocks_.size(); ++i) {
+ ResidualBlock* residual_block = residual_blocks_[i];
+ int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+ // Determine if the residual block is fixed, and also mark varying
+ // parameters that appear in the residual block.
+ bool all_constant = true;
+ for (int k = 0; k < num_parameter_blocks; k++) {
+ ParameterBlock* parameter_block = residual_block->parameter_blocks()[k];
+ if (!parameter_block->IsConstant()) {
+ all_constant = false;
+ parameter_block->set_index(1);
+ }
+ }
+
+ if (!all_constant) {
+ residual_blocks_[num_active_residual_blocks++] = residual_block;
+ continue;
+ }
+
+ // The residual is constant and will be removed, so its cost is
+ // added to the variable fixed_cost.
+ double cost = 0.0;
+ if (!residual_block->Evaluate(true,
+ &cost,
+ NULL,
+ NULL,
+ residual_block_evaluate_scratch.get())) {
+ *error = StringPrintf("Evaluation of the residual %d failed during "
+ "removal of fixed residual blocks.", i);
+ return false;
+ }
+ *fixed_cost += cost;
+ }
+ residual_blocks_.resize(num_active_residual_blocks);
+
+ // Filter out unused or fixed parameter blocks.
+ int num_active_parameter_blocks = 0;
+ removed_parameter_blocks->clear();
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ ParameterBlock* parameter_block = parameter_blocks_[i];
+ if (parameter_block->index() == -1) {
+ removed_parameter_blocks->push_back(parameter_block->mutable_user_state());
+ } else {
+ parameter_blocks_[num_active_parameter_blocks++] = parameter_block;
+ }
+ }
+ parameter_blocks_.resize(num_active_parameter_blocks);
+
+ if (!(((NumResidualBlocks() == 0) &&
+ (NumParameterBlocks() == 0)) ||
+ ((NumResidualBlocks() != 0) &&
+ (NumParameterBlocks() != 0)))) {
+ *error = "Congratulations, you found a bug in Ceres. Please report it.";
+ return false;
+ }
+
+ return true;
+}
+
+bool Program::IsParameterBlockSetIndependent(const set<double*>& independent_set) const {
+ // Loop over each residual block and ensure that no two parameter
+ // blocks in the same residual block are part of
+ // parameter_block_ptrs as that would violate the assumption that it
+ // is an independent set in the Hessian matrix.
+ for (vector<ResidualBlock*>::const_iterator it = residual_blocks_.begin();
+ it != residual_blocks_.end();
+ ++it) {
+ ParameterBlock* const* parameter_blocks = (*it)->parameter_blocks();
+ const int num_parameter_blocks = (*it)->NumParameterBlocks();
+ int count = 0;
+ for (int i = 0; i < num_parameter_blocks; ++i) {
+ count += independent_set.count(
+ parameter_blocks[i]->mutable_user_state());
+ }
+ if (count > 1) {
+ return false;
+ }
+ }
+ return true;
+}
+
+TripletSparseMatrix* Program::CreateJacobianBlockSparsityTranspose() const {
+ // Matrix to store the block sparsity structure of the Jacobian.
+ TripletSparseMatrix* tsm =
+ new TripletSparseMatrix(NumParameterBlocks(),
+ NumResidualBlocks(),
+ 10 * NumResidualBlocks());
+ int num_nonzeros = 0;
+ int* rows = tsm->mutable_rows();
+ int* cols = tsm->mutable_cols();
+ double* values = tsm->mutable_values();
+
+ for (int c = 0; c < residual_blocks_.size(); ++c) {
+ const ResidualBlock* residual_block = residual_blocks_[c];
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+ ParameterBlock* const* parameter_blocks =
+ residual_block->parameter_blocks();
+
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ if (parameter_blocks[j]->IsConstant()) {
+ continue;
+ }
+
+ // Re-size the matrix if needed.
+ if (num_nonzeros >= tsm->max_num_nonzeros()) {
+ tsm->set_num_nonzeros(num_nonzeros);
+ tsm->Reserve(2 * num_nonzeros);
+ rows = tsm->mutable_rows();
+ cols = tsm->mutable_cols();
+ values = tsm->mutable_values();
+ }
+
+ const int r = parameter_blocks[j]->index();
+ rows[num_nonzeros] = r;
+ cols[num_nonzeros] = c;
+ values[num_nonzeros] = 1.0;
+ ++num_nonzeros;
+ }
+ }
+
+ tsm->set_num_nonzeros(num_nonzeros);
+ return tsm;
+}
+
int Program::NumResidualBlocks() const {
return residual_blocks_.size();
}
diff --git a/internal/ceres/program.h b/internal/ceres/program.h
index 5002b7e..c7b22c4 100644
--- a/internal/ceres/program.h
+++ b/internal/ceres/program.h
@@ -31,6 +31,7 @@
#ifndef CERES_INTERNAL_PROGRAM_H_
#define CERES_INTERNAL_PROGRAM_H_
+#include <set>
#include <string>
#include <vector>
#include "ceres/internal/port.h"
@@ -41,6 +42,7 @@ namespace internal {
class ParameterBlock;
class ProblemImpl;
class ResidualBlock;
+class TripletSparseMatrix;
// A nonlinear least squares optimization problem. This is different from the
// similarly-named "Problem" object, which offers a mutation interface for
@@ -99,6 +101,51 @@ class Program {
// position of the parameter in the state and delta vector respectively.
void SetParameterOffsetsAndIndex();
+ // Check if the internal state of the program (the indexing and the
+ // offsets) are correct.
+ bool IsValid() const;
+
+ bool ParameterBlocksAreFinite(string* message) const;
+
+ // Returns true if the program has any non-constant parameter blocks
+ // which have non-trivial bounds constraints.
+ bool IsBoundsConstrained() const;
+
+ // Returns false, if the program has any constant parameter blocks
+ // which are not feasible, or any variable parameter blocks which
+ // have a lower bound greater than or equal to the upper bound.
+ bool IsFeasible(string* message) const;
+
+ // Loop over each residual block and ensure that no two parameter
+ // blocks in the same residual block are part of
+ // parameter_blocks as that would violate the assumption that it
+ // is an independent set in the Hessian matrix.
+ bool IsParameterBlockSetIndependent(const set<double*>& independent_set) const;
+
+ // Create a TripletSparseMatrix which contains the zero-one
+ // structure corresponding to the block sparsity of the transpose of
+ // the Jacobian matrix.
+ //
+ // Caller owns the result.
+ TripletSparseMatrix* CreateJacobianBlockSparsityTranspose() const;
+
+ // Create a copy of this program and removes constant parameter
+ // blocks and residual blocks with no varying parameter blocks while
+ // preserving their relative order.
+ //
+ // removed_parameter_blocks on exit will contain the list of
+ // parameter blocks that were removed.
+ //
+ // fixed_cost will be equal to the sum of the costs of the residual
+ // blocks that were removed.
+ //
+ // If there was a problem, then the function will return a NULL
+ // pointer and error will contain a human readable description of
+ // the problem.
+ Program* CreateReducedProgram(vector<double*>* removed_parameter_blocks,
+ double* fixed_cost,
+ string* error) const;
+
// See problem.h for what these do.
int NumParameterBlocks() const;
int NumParameters() const;
@@ -116,6 +163,21 @@ class Program {
string ToString() const;
private:
+ // Remove constant parameter blocks and residual blocks with no
+ // varying parameter blocks while preserving their relative order.
+ //
+ // removed_parameter_blocks on exit will contain the list of
+ // parameter blocks that were removed.
+ //
+ // fixed_cost will be equal to the sum of the costs of the residual
+ // blocks that were removed.
+ //
+ // If there was a problem, then the function will return false and
+ // error will contain a human readable description of the problem.
+ bool RemoveFixedBlocks(vector<double*>* removed_parameter_blocks,
+ double* fixed_cost,
+ string* message);
+
// The Program does not own the ParameterBlock or ResidualBlock objects.
vector<ParameterBlock*> parameter_blocks_;
vector<ResidualBlock*> residual_blocks_;
diff --git a/internal/ceres/program_evaluator.h b/internal/ceres/program_evaluator.h
index 8aa2a39..672c233 100644
--- a/internal/ceres/program_evaluator.h
+++ b/internal/ceres/program_evaluator.h
@@ -79,6 +79,9 @@
#ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_
#define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifdef CERES_USE_OPENMP
#include <omp.h>
#endif
@@ -97,7 +100,13 @@
namespace ceres {
namespace internal {
-template<typename EvaluatePreparer, typename JacobianWriter>
+struct NullJacobianFinalizer {
+ void operator()(SparseMatrix* jacobian, int num_parameters) {}
+};
+
+template<typename EvaluatePreparer,
+ typename JacobianWriter,
+ typename JacobianFinalizer = NullJacobianFinalizer>
class ProgramEvaluator : public Evaluator {
public:
ProgramEvaluator(const Evaluator::Options &options, Program* program)
@@ -244,9 +253,10 @@ class ProgramEvaluator : public Evaluator {
}
if (!abort) {
+ const int num_parameters = program_->NumEffectiveParameters();
+
// Sum the cost and gradient (if requested) from each thread.
(*cost) = 0.0;
- int num_parameters = program_->NumEffectiveParameters();
if (gradient != NULL) {
VectorRef(gradient, num_parameters).setZero();
}
@@ -257,6 +267,15 @@ class ProgramEvaluator : public Evaluator {
VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
}
}
+
+ // Finalize the Jacobian if it is available.
+ // `num_parameters` is passed to the finalizer so that additional
+ // storage can be reserved for additional diagonal elements if
+ // necessary.
+ if (jacobian != NULL) {
+ JacobianFinalizer f;
+ f(jacobian, num_parameters);
+ }
}
return !abort;
}
diff --git a/internal/ceres/program_test.cc b/internal/ceres/program_test.cc
new file mode 100644
index 0000000..10bfa12
--- /dev/null
+++ b/internal/ceres/program_test.cc
@@ -0,0 +1,431 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/program.h"
+
+#include <limits>
+#include <cmath>
+#include <vector>
+#include "ceres/sized_cost_function.h"
+#include "ceres/problem_impl.h"
+#include "ceres/residual_block.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// A cost function that simply returns its argument.
+class UnaryIdentityCostFunction : public SizedCostFunction<1, 1> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ residuals[0] = parameters[0][0];
+ if (jacobians != NULL && jacobians[0] != NULL) {
+ jacobians[0][0] = 1.0;
+ }
+ return true;
+ }
+};
+
+// Templated base class for the CostFunction signatures.
+template <int kNumResiduals, int N0, int N1, int N2>
+class MockCostFunctionBase : public
+SizedCostFunction<kNumResiduals, N0, N1, N2> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ for (int i = 0; i < kNumResiduals; ++i) {
+ residuals[i] = kNumResiduals + N0 + N1 + N2;
+ }
+ return true;
+ }
+};
+
+class UnaryCostFunction : public MockCostFunctionBase<2, 1, 0, 0> {};
+class BinaryCostFunction : public MockCostFunctionBase<2, 1, 1, 0> {};
+class TernaryCostFunction : public MockCostFunctionBase<2, 1, 1, 1> {};
+
+TEST(Program, RemoveFixedBlocksNothingConstant) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+ problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
+
+ vector<double*> removed_parameter_blocks;
+ double fixed_cost = 0.0;
+ string message;
+ scoped_ptr<Program> reduced_program(
+ CHECK_NOTNULL(problem
+ .program()
+ .CreateReducedProgram(&removed_parameter_blocks,
+ &fixed_cost,
+ &message)));
+
+ EXPECT_EQ(reduced_program->NumParameterBlocks(), 3);
+ EXPECT_EQ(reduced_program->NumResidualBlocks(), 3);
+ EXPECT_EQ(removed_parameter_blocks.size(), 0);
+ EXPECT_EQ(fixed_cost, 0.0);
+}
+
+TEST(Program, RemoveFixedBlocksAllParameterBlocksConstant) {
+ ProblemImpl problem;
+ double x = 1.0;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+ problem.SetParameterBlockConstant(&x);
+
+ vector<double*> removed_parameter_blocks;
+ double fixed_cost = 0.0;
+ string message;
+ scoped_ptr<Program> reduced_program(
+ CHECK_NOTNULL(problem
+ .program()
+ .CreateReducedProgram(&removed_parameter_blocks,
+ &fixed_cost,
+ &message)));
+ EXPECT_EQ(reduced_program->NumParameterBlocks(), 0);
+ EXPECT_EQ(reduced_program->NumResidualBlocks(), 0);
+ EXPECT_EQ(removed_parameter_blocks.size(), 1);
+ EXPECT_EQ(removed_parameter_blocks[0], &x);
+ EXPECT_EQ(fixed_cost, 9.0);
+}
+
+
+TEST(Program, RemoveFixedBlocksNoResidualBlocks) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+
+ vector<double*> removed_parameter_blocks;
+ double fixed_cost = 0.0;
+ string message;
+ scoped_ptr<Program> reduced_program(
+ CHECK_NOTNULL(problem
+ .program()
+ .CreateReducedProgram(&removed_parameter_blocks,
+ &fixed_cost,
+ &message)));
+ EXPECT_EQ(reduced_program->NumParameterBlocks(), 0);
+ EXPECT_EQ(reduced_program->NumResidualBlocks(), 0);
+ EXPECT_EQ(removed_parameter_blocks.size(), 3);
+ EXPECT_EQ(fixed_cost, 0.0);
+}
+
+TEST(Program, RemoveFixedBlocksOneParameterBlockConstant) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+ problem.SetParameterBlockConstant(&x);
+
+ vector<double*> removed_parameter_blocks;
+ double fixed_cost = 0.0;
+ string message;
+ scoped_ptr<Program> reduced_program(
+ CHECK_NOTNULL(problem
+ .program()
+ .CreateReducedProgram(&removed_parameter_blocks,
+ &fixed_cost,
+ &message)));
+ EXPECT_EQ(reduced_program->NumParameterBlocks(), 1);
+ EXPECT_EQ(reduced_program->NumResidualBlocks(), 1);
+}
+
+TEST(Program, RemoveFixedBlocksNumEliminateBlocks) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+ problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+ problem.SetParameterBlockConstant(&x);
+
+ vector<double*> removed_parameter_blocks;
+ double fixed_cost = 0.0;
+ string message;
+ scoped_ptr<Program> reduced_program(
+ CHECK_NOTNULL(problem
+ .program()
+ .CreateReducedProgram(&removed_parameter_blocks,
+ &fixed_cost,
+ &message)));
+ EXPECT_EQ(reduced_program->NumParameterBlocks(), 2);
+ EXPECT_EQ(reduced_program->NumResidualBlocks(), 2);
+}
+
+TEST(Program, RemoveFixedBlocksFixedCost) {
+ ProblemImpl problem;
+ double x = 1.23;
+ double y = 4.56;
+ double z = 7.89;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+ problem.AddResidualBlock(new UnaryIdentityCostFunction(), NULL, &x);
+ problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+ problem.SetParameterBlockConstant(&x);
+
+ ResidualBlock *expected_removed_block = problem.program().residual_blocks()[0];
+ scoped_array<double> scratch(
+ new double[expected_removed_block->NumScratchDoublesForEvaluate()]);
+ double expected_fixed_cost;
+ expected_removed_block->Evaluate(true,
+ &expected_fixed_cost,
+ NULL,
+ NULL,
+ scratch.get());
+
+
+ vector<double*> removed_parameter_blocks;
+ double fixed_cost = 0.0;
+ string message;
+ scoped_ptr<Program> reduced_program(
+ CHECK_NOTNULL(problem
+ .program()
+ .CreateReducedProgram(&removed_parameter_blocks,
+ &fixed_cost,
+ &message)));
+
+ EXPECT_EQ(reduced_program->NumParameterBlocks(), 2);
+ EXPECT_EQ(reduced_program->NumResidualBlocks(), 2);
+ EXPECT_DOUBLE_EQ(fixed_cost, expected_fixed_cost);
+}
+
+TEST(Program, CreateJacobianBlockSparsityTranspose) {
+ ProblemImpl problem;
+ double x[2];
+ double y[3];
+ double z;
+
+ problem.AddParameterBlock(x, 2);
+ problem.AddParameterBlock(y, 3);
+ problem.AddParameterBlock(&z, 1);
+
+ problem.AddResidualBlock(new MockCostFunctionBase<2, 2, 0, 0>(), NULL, x);
+ problem.AddResidualBlock(new MockCostFunctionBase<3, 1, 2, 0>(), NULL, &z, x);
+ problem.AddResidualBlock(new MockCostFunctionBase<4, 1, 3, 0>(), NULL, &z, y);
+ problem.AddResidualBlock(new MockCostFunctionBase<5, 1, 3, 0>(), NULL, &z, y);
+ problem.AddResidualBlock(new MockCostFunctionBase<1, 2, 1, 0>(), NULL, x, &z);
+ problem.AddResidualBlock(new MockCostFunctionBase<2, 1, 3, 0>(), NULL, &z, y);
+ problem.AddResidualBlock(new MockCostFunctionBase<2, 2, 1, 0>(), NULL, x, &z);
+ problem.AddResidualBlock(new MockCostFunctionBase<1, 3, 0, 0>(), NULL, y);
+
+ TripletSparseMatrix expected_block_sparse_jacobian(3, 8, 14);
+ {
+ int* rows = expected_block_sparse_jacobian.mutable_rows();
+ int* cols = expected_block_sparse_jacobian.mutable_cols();
+ double* values = expected_block_sparse_jacobian.mutable_values();
+ rows[0] = 0;
+ cols[0] = 0;
+
+ rows[1] = 2;
+ cols[1] = 1;
+ rows[2] = 0;
+ cols[2] = 1;
+
+ rows[3] = 2;
+ cols[3] = 2;
+ rows[4] = 1;
+ cols[4] = 2;
+
+ rows[5] = 2;
+ cols[5] = 3;
+ rows[6] = 1;
+ cols[6] = 3;
+
+ rows[7] = 0;
+ cols[7] = 4;
+ rows[8] = 2;
+ cols[8] = 4;
+
+ rows[9] = 2;
+ cols[9] = 5;
+ rows[10] = 1;
+ cols[10] = 5;
+
+ rows[11] = 0;
+ cols[11] = 6;
+ rows[12] = 2;
+ cols[12] = 6;
+
+ rows[13] = 1;
+ cols[13] = 7;
+ fill(values, values + 14, 1.0);
+ expected_block_sparse_jacobian.set_num_nonzeros(14);
+ }
+
+ Program* program = problem.mutable_program();
+ program->SetParameterOffsetsAndIndex();
+
+ scoped_ptr<TripletSparseMatrix> actual_block_sparse_jacobian(
+ program->CreateJacobianBlockSparsityTranspose());
+
+ Matrix expected_dense_jacobian;
+ expected_block_sparse_jacobian.ToDenseMatrix(&expected_dense_jacobian);
+
+ Matrix actual_dense_jacobian;
+ actual_block_sparse_jacobian->ToDenseMatrix(&actual_dense_jacobian);
+ EXPECT_EQ((expected_dense_jacobian - actual_dense_jacobian).norm(), 0.0);
+}
+
+template <int kNumResiduals, int kNumParameterBlocks>
+class NumParameterBlocksCostFunction : public CostFunction {
+ public:
+ NumParameterBlocksCostFunction() {
+ set_num_residuals(kNumResiduals);
+ for (int i = 0; i < kNumParameterBlocks; ++i) {
+ mutable_parameter_block_sizes()->push_back(1);
+ }
+ }
+
+ virtual ~NumParameterBlocksCostFunction() {
+ }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ return true;
+ }
+};
+
+TEST(Program, ReallocationInCreateJacobianBlockSparsityTranspose) {
+ // CreateJacobianBlockSparsityTranspose starts with a conservative
+ // estimate of the size of the sparsity pattern. This test ensures
+ // that when those estimates are violated, the reallocation/resizing
+ // logic works correctly.
+
+ ProblemImpl problem;
+ double x[20];
+
+ vector<double*> parameter_blocks;
+ for (int i = 0; i < 20; ++i) {
+ problem.AddParameterBlock(x + i, 1);
+ parameter_blocks.push_back(x + i);
+ }
+
+ problem.AddResidualBlock(new NumParameterBlocksCostFunction<1, 20>(),
+ NULL,
+ parameter_blocks);
+
+ TripletSparseMatrix expected_block_sparse_jacobian(20, 1, 20);
+ {
+ int* rows = expected_block_sparse_jacobian.mutable_rows();
+ int* cols = expected_block_sparse_jacobian.mutable_cols();
+ for (int i = 0; i < 20; ++i) {
+ rows[i] = i;
+ cols[i] = 0;
+ }
+
+ double* values = expected_block_sparse_jacobian.mutable_values();
+ fill(values, values + 20, 1.0);
+ expected_block_sparse_jacobian.set_num_nonzeros(20);
+ }
+
+ Program* program = problem.mutable_program();
+ program->SetParameterOffsetsAndIndex();
+
+ scoped_ptr<TripletSparseMatrix> actual_block_sparse_jacobian(
+ program->CreateJacobianBlockSparsityTranspose());
+
+ Matrix expected_dense_jacobian;
+ expected_block_sparse_jacobian.ToDenseMatrix(&expected_dense_jacobian);
+
+ Matrix actual_dense_jacobian;
+ actual_block_sparse_jacobian->ToDenseMatrix(&actual_dense_jacobian);
+ EXPECT_EQ((expected_dense_jacobian - actual_dense_jacobian).norm(), 0.0);
+}
+
+TEST(Program, ProblemHasNanParameterBlocks) {
+ ProblemImpl problem;
+ double x[2];
+ x[0] = 1.0;
+ x[1] = std::numeric_limits<double>::quiet_NaN();
+ problem.AddResidualBlock(new MockCostFunctionBase<1, 2, 0, 0>(), NULL, x);
+ string error;
+ EXPECT_FALSE(problem.program().ParameterBlocksAreFinite(&error));
+ EXPECT_NE(error.find("has at least one invalid value"),
+ string::npos) << error;
+}
+
+TEST(Program, InfeasibleParameterBlock) {
+ ProblemImpl problem;
+ double x[] = {0.0, 0.0};
+ problem.AddResidualBlock(new MockCostFunctionBase<1, 2, 0, 0>(), NULL, x);
+ problem.SetParameterLowerBound(x, 0, 2.0);
+ problem.SetParameterUpperBound(x, 0, 1.0);
+ string error;
+ EXPECT_FALSE(problem.program().IsFeasible(&error));
+ EXPECT_NE(error.find("infeasible bound"), string::npos) << error;
+}
+
+TEST(Program, InfeasibleConstantParameterBlock) {
+ ProblemImpl problem;
+ double x[] = {0.0, 0.0};
+ problem.AddResidualBlock(new MockCostFunctionBase<1, 2, 0, 0>(), NULL, x);
+ problem.SetParameterLowerBound(x, 0, 1.0);
+ problem.SetParameterUpperBound(x, 0, 2.0);
+ problem.SetParameterBlockConstant(x);
+ string error;
+ EXPECT_FALSE(problem.program().IsFeasible(&error));
+ EXPECT_NE(error.find("infeasible value"), string::npos) << error;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/reorder_program.cc b/internal/ceres/reorder_program.cc
new file mode 100644
index 0000000..162bfb8
--- /dev/null
+++ b/internal/ceres/reorder_program.cc
@@ -0,0 +1,434 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/reorder_program.h"
+
+#include <algorithm>
+#include <numeric>
+#include <vector>
+
+#include "ceres/cxsparse.h"
+#include "ceres/internal/port.h"
+#include "ceres/ordered_groups.h"
+#include "ceres/parameter_block.h"
+#include "ceres/parameter_block_ordering.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/solver.h"
+#include "ceres/suitesparse.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// Find the minimum index of any parameter block to the given residual.
+// Parameter blocks that have indices greater than num_eliminate_blocks are
+// considered to have an index equal to num_eliminate_blocks.
+static int MinParameterBlock(const ResidualBlock* residual_block,
+ int num_eliminate_blocks) {
+ int min_parameter_block_position = num_eliminate_blocks;
+ for (int i = 0; i < residual_block->NumParameterBlocks(); ++i) {
+ ParameterBlock* parameter_block = residual_block->parameter_blocks()[i];
+ if (!parameter_block->IsConstant()) {
+ CHECK_NE(parameter_block->index(), -1)
+ << "Did you forget to call Program::SetParameterOffsetsAndIndex()? "
+ << "This is a Ceres bug; please contact the developers!";
+ min_parameter_block_position = std::min(parameter_block->index(),
+ min_parameter_block_position);
+ }
+ }
+ return min_parameter_block_position;
+}
+
+void OrderingForSparseNormalCholeskyUsingSuiteSparse(
+ const TripletSparseMatrix& tsm_block_jacobian_transpose,
+ const vector<ParameterBlock*>& parameter_blocks,
+ const ParameterBlockOrdering& parameter_block_ordering,
+ int* ordering) {
+#ifdef CERES_NO_SUITESPARSE
+ LOG(FATAL) << "Congratulations, you found a Ceres bug! "
+ << "Please report this error to the developers.";
+#else
+ SuiteSparse ss;
+ cholmod_sparse* block_jacobian_transpose =
+ ss.CreateSparseMatrix(
+ const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
+
+ // No CAMD or the user did not supply a useful ordering, then just
+ // use regular AMD.
+ if (parameter_block_ordering.NumGroups() <= 1 ||
+ !SuiteSparse::IsConstrainedApproximateMinimumDegreeOrderingAvailable()) {
+ ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose, &ordering[0]);
+ } else {
+ vector<int> constraints;
+ for (int i = 0; i < parameter_blocks.size(); ++i) {
+ constraints.push_back(
+ parameter_block_ordering.GroupId(
+ parameter_blocks[i]->mutable_user_state()));
+ }
+ ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
+ &constraints[0],
+ ordering);
+ }
+
+ ss.Free(block_jacobian_transpose);
+#endif // CERES_NO_SUITESPARSE
+}
+
+void OrderingForSparseNormalCholeskyUsingCXSparse(
+ const TripletSparseMatrix& tsm_block_jacobian_transpose,
+ int* ordering) {
+#ifdef CERES_NO_CXSPARSE
+ LOG(FATAL) << "Congratulations, you found a Ceres bug! "
+ << "Please report this error to the developers.";
+#else // CERES_NO_CXSPARSE
+ // CXSparse works with J'J instead of J'. So compute the block
+ // sparsity for J'J and compute an approximate minimum degree
+ // ordering.
+ CXSparse cxsparse;
+ cs_di* block_jacobian_transpose;
+ block_jacobian_transpose =
+ cxsparse.CreateSparseMatrix(
+ const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
+ cs_di* block_jacobian = cxsparse.TransposeMatrix(block_jacobian_transpose);
+ cs_di* block_hessian =
+ cxsparse.MatrixMatrixMultiply(block_jacobian_transpose, block_jacobian);
+ cxsparse.Free(block_jacobian);
+ cxsparse.Free(block_jacobian_transpose);
+
+ cxsparse.ApproximateMinimumDegreeOrdering(block_hessian, ordering);
+ cxsparse.Free(block_hessian);
+#endif // CERES_NO_CXSPARSE
+}
+
+} // namespace
+
+bool ApplyOrdering(const ProblemImpl::ParameterMap& parameter_map,
+ const ParameterBlockOrdering& ordering,
+ Program* program,
+ string* error) {
+ const int num_parameter_blocks = program->NumParameterBlocks();
+ if (ordering.NumElements() != num_parameter_blocks) {
+ *error = StringPrintf("User specified ordering does not have the same "
+ "number of parameters as the problem. The problem"
+ "has %d blocks while the ordering has %d blocks.",
+ num_parameter_blocks,
+ ordering.NumElements());
+ return false;
+ }
+
+ vector<ParameterBlock*>* parameter_blocks =
+ program->mutable_parameter_blocks();
+ parameter_blocks->clear();
+
+ const map<int, set<double*> >& groups =
+ ordering.group_to_elements();
+
+ for (map<int, set<double*> >::const_iterator group_it = groups.begin();
+ group_it != groups.end();
+ ++group_it) {
+ const set<double*>& group = group_it->second;
+ for (set<double*>::const_iterator parameter_block_ptr_it = group.begin();
+ parameter_block_ptr_it != group.end();
+ ++parameter_block_ptr_it) {
+ ProblemImpl::ParameterMap::const_iterator parameter_block_it =
+ parameter_map.find(*parameter_block_ptr_it);
+ if (parameter_block_it == parameter_map.end()) {
+ *error = StringPrintf("User specified ordering contains a pointer "
+ "to a double that is not a parameter block in "
+ "the problem. The invalid double is in group: %d",
+ group_it->first);
+ return false;
+ }
+ parameter_blocks->push_back(parameter_block_it->second);
+ }
+ }
+ return true;
+}
+
+bool LexicographicallyOrderResidualBlocks(const int num_eliminate_blocks,
+ Program* program,
+ string* error) {
+ CHECK_GE(num_eliminate_blocks, 1)
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+
+ // Create a histogram of the number of residuals for each E block. There is an
+ // extra bucket at the end to catch all non-eliminated F blocks.
+ vector<int> residual_blocks_per_e_block(num_eliminate_blocks + 1);
+ vector<ResidualBlock*>* residual_blocks = program->mutable_residual_blocks();
+ vector<int> min_position_per_residual(residual_blocks->size());
+ for (int i = 0; i < residual_blocks->size(); ++i) {
+ ResidualBlock* residual_block = (*residual_blocks)[i];
+ int position = MinParameterBlock(residual_block, num_eliminate_blocks);
+ min_position_per_residual[i] = position;
+ DCHECK_LE(position, num_eliminate_blocks);
+ residual_blocks_per_e_block[position]++;
+ }
+
+ // Run a cumulative sum on the histogram, to obtain offsets to the start of
+ // each histogram bucket (where each bucket is for the residuals for that
+ // E-block).
+ vector<int> offsets(num_eliminate_blocks + 1);
+ std::partial_sum(residual_blocks_per_e_block.begin(),
+ residual_blocks_per_e_block.end(),
+ offsets.begin());
+ CHECK_EQ(offsets.back(), residual_blocks->size())
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+
+ CHECK(find(residual_blocks_per_e_block.begin(),
+ residual_blocks_per_e_block.end() - 1, 0) !=
+ residual_blocks_per_e_block.end())
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+
+ // Fill in each bucket with the residual blocks for its corresponding E block.
+ // Each bucket is individually filled from the back of the bucket to the front
+ // of the bucket. The filling order among the buckets is dictated by the
+ // residual blocks. This loop uses the offsets as counters; subtracting one
+ // from each offset as a residual block is placed in the bucket. When the
+ // filling is finished, the offset pointerts should have shifted down one
+ // entry (this is verified below).
+ vector<ResidualBlock*> reordered_residual_blocks(
+ (*residual_blocks).size(), static_cast<ResidualBlock*>(NULL));
+ for (int i = 0; i < residual_blocks->size(); ++i) {
+ int bucket = min_position_per_residual[i];
+
+ // Decrement the cursor, which should now point at the next empty position.
+ offsets[bucket]--;
+
+ // Sanity.
+ CHECK(reordered_residual_blocks[offsets[bucket]] == NULL)
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+
+ reordered_residual_blocks[offsets[bucket]] = (*residual_blocks)[i];
+ }
+
+ // Sanity check #1: The difference in bucket offsets should match the
+ // histogram sizes.
+ for (int i = 0; i < num_eliminate_blocks; ++i) {
+ CHECK_EQ(residual_blocks_per_e_block[i], offsets[i + 1] - offsets[i])
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+ }
+ // Sanity check #2: No NULL's left behind.
+ for (int i = 0; i < reordered_residual_blocks.size(); ++i) {
+ CHECK(reordered_residual_blocks[i] != NULL)
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+ }
+
+ // Now that the residuals are collected by E block, swap them in place.
+ swap(*program->mutable_residual_blocks(), reordered_residual_blocks);
+ return true;
+}
+
+void MaybeReorderSchurComplementColumnsUsingSuiteSparse(
+ const ParameterBlockOrdering& parameter_block_ordering,
+ Program* program) {
+ // Pre-order the columns corresponding to the schur complement if
+ // possible.
+#ifndef CERES_NO_SUITESPARSE
+ SuiteSparse ss;
+ if (!SuiteSparse::IsConstrainedApproximateMinimumDegreeOrderingAvailable()) {
+ return;
+ }
+
+ vector<int> constraints;
+ vector<ParameterBlock*>& parameter_blocks =
+ *(program->mutable_parameter_blocks());
+
+ for (int i = 0; i < parameter_blocks.size(); ++i) {
+ constraints.push_back(
+ parameter_block_ordering.GroupId(
+ parameter_blocks[i]->mutable_user_state()));
+ }
+
+ // Renumber the entries of constraints to be contiguous integers
+ // as camd requires that the group ids be in the range [0,
+ // parameter_blocks.size() - 1].
+ MapValuesToContiguousRange(constraints.size(), &constraints[0]);
+
+ // Set the offsets and index for CreateJacobianSparsityTranspose.
+ program->SetParameterOffsetsAndIndex();
+ // Compute a block sparse presentation of J'.
+ scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
+ program->CreateJacobianBlockSparsityTranspose());
+
+
+ cholmod_sparse* block_jacobian_transpose =
+ ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
+
+ vector<int> ordering(parameter_blocks.size(), 0);
+ ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
+ &constraints[0],
+ &ordering[0]);
+ ss.Free(block_jacobian_transpose);
+
+ const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
+ for (int i = 0; i < program->NumParameterBlocks(); ++i) {
+ parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
+ }
+#endif
+}
+
+bool ReorderProgramForSchurTypeLinearSolver(
+ const LinearSolverType linear_solver_type,
+ const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+ const ProblemImpl::ParameterMap& parameter_map,
+ ParameterBlockOrdering* parameter_block_ordering,
+ Program* program,
+ string* error) {
+ if (parameter_block_ordering->NumGroups() == 1) {
+ // If the user supplied an parameter_block_ordering with just one
+ // group, it is equivalent to the user supplying NULL as an
+ // parameter_block_ordering. Ceres is completely free to choose the
+ // parameter block ordering as it sees fit. For Schur type solvers,
+ // this means that the user wishes for Ceres to identify the
+ // e_blocks, which we do by computing a maximal independent set.
+ vector<ParameterBlock*> schur_ordering;
+ const int num_eliminate_blocks =
+ ComputeStableSchurOrdering(*program, &schur_ordering);
+
+ CHECK_EQ(schur_ordering.size(), program->NumParameterBlocks())
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+
+ // Update the parameter_block_ordering object.
+ for (int i = 0; i < schur_ordering.size(); ++i) {
+ double* parameter_block = schur_ordering[i]->mutable_user_state();
+ const int group_id = (i < num_eliminate_blocks) ? 0 : 1;
+ parameter_block_ordering->AddElementToGroup(parameter_block, group_id);
+ }
+
+ // We could call ApplyOrdering but this is cheaper and
+ // simpler.
+ swap(*program->mutable_parameter_blocks(), schur_ordering);
+ } else {
+ // The user provided an ordering with more than one elimination
+ // group. Trust the user and apply the ordering.
+ if (!ApplyOrdering(parameter_map,
+ *parameter_block_ordering,
+ program,
+ error)) {
+ return false;
+ }
+ }
+
+ if (linear_solver_type == SPARSE_SCHUR &&
+ sparse_linear_algebra_library_type == SUITE_SPARSE) {
+ MaybeReorderSchurComplementColumnsUsingSuiteSparse(
+ *parameter_block_ordering,
+ program);
+ }
+
+ program->SetParameterOffsetsAndIndex();
+ // Schur type solvers also require that their residual blocks be
+ // lexicographically ordered.
+ const int num_eliminate_blocks =
+ parameter_block_ordering->group_to_elements().begin()->second.size();
+ if (!LexicographicallyOrderResidualBlocks(num_eliminate_blocks,
+ program,
+ error)) {
+ return false;
+ }
+
+ program->SetParameterOffsetsAndIndex();
+ return true;
+}
+
+bool ReorderProgramForSparseNormalCholesky(
+ const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+ const ParameterBlockOrdering& parameter_block_ordering,
+ Program* program,
+ string* error) {
+
+ if (sparse_linear_algebra_library_type != SUITE_SPARSE &&
+ sparse_linear_algebra_library_type != CX_SPARSE &&
+ sparse_linear_algebra_library_type != EIGEN_SPARSE) {
+ *error = "Unknown sparse linear algebra library.";
+ return false;
+ }
+
+ // For Eigen, there is nothing to do. This is because Eigen in its
+ // current stable version does not expose a method for doing
+ // symbolic analysis on pre-ordered matrices, so a block
+ // pre-ordering is a bit pointless.
+ //
+ // The dev version as recently as July 20, 2014 has support for
+ // pre-ordering. Once this becomes more widespread, or we add
+ // support for detecting Eigen versions, we can add support for this
+ // along the lines of CXSparse.
+ if (sparse_linear_algebra_library_type == EIGEN_SPARSE) {
+ program->SetParameterOffsetsAndIndex();
+ return true;
+ }
+
+ // Set the offsets and index for CreateJacobianSparsityTranspose.
+ program->SetParameterOffsetsAndIndex();
+ // Compute a block sparse presentation of J'.
+ scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
+ program->CreateJacobianBlockSparsityTranspose());
+
+ vector<int> ordering(program->NumParameterBlocks(), 0);
+ vector<ParameterBlock*>& parameter_blocks =
+ *(program->mutable_parameter_blocks());
+
+ if (sparse_linear_algebra_library_type == SUITE_SPARSE) {
+ OrderingForSparseNormalCholeskyUsingSuiteSparse(
+ *tsm_block_jacobian_transpose,
+ parameter_blocks,
+ parameter_block_ordering,
+ &ordering[0]);
+ } else if (sparse_linear_algebra_library_type == CX_SPARSE){
+ OrderingForSparseNormalCholeskyUsingCXSparse(
+ *tsm_block_jacobian_transpose,
+ &ordering[0]);
+ }
+
+ // Apply ordering.
+ const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
+ for (int i = 0; i < program->NumParameterBlocks(); ++i) {
+ parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
+ }
+
+ program->SetParameterOffsetsAndIndex();
+ return true;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/reorder_program.h b/internal/ceres/reorder_program.h
new file mode 100644
index 0000000..d3962f9
--- /dev/null
+++ b/internal/ceres/reorder_program.h
@@ -0,0 +1,101 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_REORDER_PROGRAM_H_
+#define CERES_INTERNAL_REORDER_PROGRAM_H_
+
+#include <string>
+#include "ceres/internal/port.h"
+#include "ceres/parameter_block_ordering.h"
+#include "ceres/problem_impl.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+
+// Reorder the parameter blocks in program using the ordering
+bool ApplyOrdering(const ProblemImpl::ParameterMap& parameter_map,
+ const ParameterBlockOrdering& ordering,
+ Program* program,
+ string* error);
+
+// Reorder the residuals for program, if necessary, so that the residuals
+// involving each E block occur together. This is a necessary condition for the
+// Schur eliminator, which works on these "row blocks" in the jacobian.
+bool LexicographicallyOrderResidualBlocks(int num_eliminate_blocks,
+ Program* program,
+ string* error);
+
+// Schur type solvers require that all parameter blocks eliminated
+// by the Schur eliminator occur before others and the residuals be
+// sorted in lexicographic order of their parameter blocks.
+//
+// If the parameter_block_ordering only contains one elimination
+// group then a maximal independent set is computed and used as the
+// first elimination group, otherwise the user's ordering is used.
+//
+// If the linear solver type is SPARSE_SCHUR and support for
+// constrained fill-reducing ordering is available in the sparse
+// linear algebra library (SuiteSparse version >= 4.2.0) then
+// columns of the schur complement matrix are ordered to reduce the
+// fill-in the Cholesky factorization.
+//
+// Upon return, ordering contains the parameter block ordering that
+// was used to order the program.
+bool ReorderProgramForSchurTypeLinearSolver(
+ LinearSolverType linear_solver_type,
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+ const ProblemImpl::ParameterMap& parameter_map,
+ ParameterBlockOrdering* parameter_block_ordering,
+ Program* program,
+ string* error);
+
+// Sparse cholesky factorization routines when doing the sparse
+// cholesky factorization of the Jacobian matrix, reorders its
+// columns to reduce the fill-in. Compute this permutation and
+// re-order the parameter blocks.
+//
+// When using SuiteSparse, if the parameter_block_ordering contains
+// more than one elimination group and support for constrained
+// fill-reducing ordering is available in the sparse linear algebra
+// library (SuiteSparse version >= 4.2.0) then the fill reducing
+// ordering will take it into account, otherwise it will be ignored.
+bool ReorderProgramForSparseNormalCholesky(
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+ const ParameterBlockOrdering& parameter_block_ordering,
+ Program* program,
+ string* error);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_REORDER_PROGRAM_
diff --git a/internal/ceres/reorder_program_test.cc b/internal/ceres/reorder_program_test.cc
new file mode 100644
index 0000000..2a0c4eb
--- /dev/null
+++ b/internal/ceres/reorder_program_test.cc
@@ -0,0 +1,170 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/reorder_program.h"
+
+#include "ceres/parameter_block.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/solver.h"
+
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// Templated base class for the CostFunction signatures.
+template <int kNumResiduals, int N0, int N1, int N2>
+class MockCostFunctionBase : public
+SizedCostFunction<kNumResiduals, N0, N1, N2> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ // Do nothing. This is never called.
+ return true;
+ }
+};
+
+class UnaryCostFunction : public MockCostFunctionBase<2, 1, 0, 0> {};
+class BinaryCostFunction : public MockCostFunctionBase<2, 1, 1, 0> {};
+class TernaryCostFunction : public MockCostFunctionBase<2, 1, 1, 1> {};
+
+TEST(_, ReorderResidualBlockNormalFunction) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &x);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &z);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &y);
+
+ ParameterBlockOrdering* linear_solver_ordering = new ParameterBlockOrdering;
+ linear_solver_ordering->AddElementToGroup(&x, 0);
+ linear_solver_ordering->AddElementToGroup(&y, 0);
+ linear_solver_ordering->AddElementToGroup(&z, 1);
+
+ Solver::Options options;
+ options.linear_solver_type = DENSE_SCHUR;
+ options.linear_solver_ordering.reset(linear_solver_ordering);
+
+ const vector<ResidualBlock*>& residual_blocks =
+ problem.program().residual_blocks();
+
+ vector<ResidualBlock*> expected_residual_blocks;
+
+ // This is a bit fragile, but it serves the purpose. We know the
+ // bucketing algorithm that the reordering function uses, so we
+ // expect the order for residual blocks for each e_block to be
+ // filled in reverse.
+ expected_residual_blocks.push_back(residual_blocks[4]);
+ expected_residual_blocks.push_back(residual_blocks[1]);
+ expected_residual_blocks.push_back(residual_blocks[0]);
+ expected_residual_blocks.push_back(residual_blocks[5]);
+ expected_residual_blocks.push_back(residual_blocks[2]);
+ expected_residual_blocks.push_back(residual_blocks[3]);
+
+ Program* program = problem.mutable_program();
+ program->SetParameterOffsetsAndIndex();
+
+ string message;
+ EXPECT_TRUE(LexicographicallyOrderResidualBlocks(
+ 2,
+ problem.mutable_program(),
+ &message));
+ EXPECT_EQ(residual_blocks.size(), expected_residual_blocks.size());
+ for (int i = 0; i < expected_residual_blocks.size(); ++i) {
+ EXPECT_EQ(residual_blocks[i], expected_residual_blocks[i]);
+ }
+}
+
+TEST(_, ApplyOrderingOrderingTooSmall) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+
+ ParameterBlockOrdering linear_solver_ordering;
+ linear_solver_ordering.AddElementToGroup(&x, 0);
+ linear_solver_ordering.AddElementToGroup(&y, 1);
+
+ Program program(problem.program());
+ string message;
+ EXPECT_FALSE(ApplyOrdering(problem.parameter_map(),
+ linear_solver_ordering,
+ &program,
+ &message));
+}
+
+TEST(_, ApplyOrderingNormal) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+
+ ParameterBlockOrdering linear_solver_ordering;
+ linear_solver_ordering.AddElementToGroup(&x, 0);
+ linear_solver_ordering.AddElementToGroup(&y, 2);
+ linear_solver_ordering.AddElementToGroup(&z, 1);
+
+ Program* program = problem.mutable_program();
+ string message;
+
+ EXPECT_TRUE(ApplyOrdering(problem.parameter_map(),
+ linear_solver_ordering,
+ program,
+ &message));
+ const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
+
+ EXPECT_EQ(parameter_blocks.size(), 3);
+ EXPECT_EQ(parameter_blocks[0]->user_state(), &x);
+ EXPECT_EQ(parameter_blocks[1]->user_state(), &z);
+ EXPECT_EQ(parameter_blocks[2]->user_state(), &y);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/residual_block_test.cc b/internal/ceres/residual_block_test.cc
index 1e03e7d..b37f50f 100644
--- a/internal/ceres/residual_block_test.cc
+++ b/internal/ceres/residual_block_test.cc
@@ -43,9 +43,9 @@ namespace internal {
class TernaryCostFunction: public CostFunction {
public:
TernaryCostFunction(int num_residuals,
- int16 parameter_block1_size,
- int16 parameter_block2_size,
- int16 parameter_block3_size) {
+ int32 parameter_block1_size,
+ int32 parameter_block2_size,
+ int32 parameter_block3_size) {
set_num_residuals(num_residuals);
mutable_parameter_block_sizes()->push_back(parameter_block1_size);
mutable_parameter_block_sizes()->push_back(parameter_block2_size);
diff --git a/internal/ceres/residual_block_utils.cc b/internal/ceres/residual_block_utils.cc
index 4d88a9f..d2564a7 100644
--- a/internal/ceres/residual_block_utils.cc
+++ b/internal/ceres/residual_block_utils.cc
@@ -61,24 +61,6 @@ void InvalidateEvaluation(const ResidualBlock& block,
}
}
-// Utility routine to print an array of doubles to a string. If the
-// array pointer is NULL, it is treated as an array of zeros.
-namespace {
-void AppendArrayToString(const int size, const double* x, string* result) {
- for (int i = 0; i < size; ++i) {
- if (x == NULL) {
- StringAppendF(result, "Not Computed ");
- } else {
- if (x[i] == kImpossibleValue) {
- StringAppendF(result, "Uninitialized ");
- } else {
- StringAppendF(result, "%12g ", x[i]);
- }
- }
- }
-}
-} // namespace
-
string EvaluationToString(const ResidualBlock& block,
double const* const* parameters,
double* cost,
diff --git a/internal/ceres/rotation_test.cc b/internal/ceres/rotation_test.cc
index 8de1bbd..fab0a7a 100644
--- a/internal/ceres/rotation_test.cc
+++ b/internal/ceres/rotation_test.cc
@@ -548,6 +548,41 @@ TEST(Rotation, AngleAxisToRotationMatrixAndBack) {
}
}
+// Takes a bunch of random axis/angle values near zero, converts them
+// to rotation matrices, and back again.
+TEST(Rotation, AngleAxisToRotationMatrixAndBackNearZero) {
+ srand(5);
+ for (int i = 0; i < kNumTrials; i++) {
+ double axis_angle[3];
+ // Make an axis by choosing three random numbers in [-1, 1) and
+ // normalizing.
+ double norm = 0;
+ for (int i = 0; i < 3; i++) {
+ axis_angle[i] = RandDouble() * 2 - 1;
+ norm += axis_angle[i] * axis_angle[i];
+ }
+ norm = sqrt(norm);
+
+ // Tiny theta.
+ double theta = 1e-16 * (kPi * 2 * RandDouble() - kPi);
+ for (int i = 0; i < 3; i++) {
+ axis_angle[i] = axis_angle[i] * theta / norm;
+ }
+
+ double matrix[9];
+ double round_trip[3];
+ AngleAxisToRotationMatrix(axis_angle, matrix);
+ ASSERT_THAT(matrix, IsOrthonormal());
+ RotationMatrixToAngleAxis(matrix, round_trip);
+
+ for (int i = 0; i < 3; ++i) {
+ EXPECT_NEAR(round_trip[i], axis_angle[i],
+ std::numeric_limits<double>::epsilon());
+ }
+ }
+}
+
+
// Transposes a 3x3 matrix.
static void Transpose3x3(double m[9]) {
std::swap(m[1], m[3]);
diff --git a/internal/ceres/runtime_numeric_diff_cost_function.cc b/internal/ceres/runtime_numeric_diff_cost_function.cc
deleted file mode 100644
index 7af275c..0000000
--- a/internal/ceres/runtime_numeric_diff_cost_function.cc
+++ /dev/null
@@ -1,217 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-//
-// Based on the templated version in public/numeric_diff_cost_function.h.
-
-#include "ceres/runtime_numeric_diff_cost_function.h"
-
-#include <algorithm>
-#include <numeric>
-#include <vector>
-#include "Eigen/Dense"
-#include "ceres/cost_function.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "glog/logging.h"
-
-namespace ceres {
-namespace internal {
-namespace {
-
-bool EvaluateJacobianForParameterBlock(const CostFunction* function,
- int parameter_block_size,
- int parameter_block,
- RuntimeNumericDiffMethod method,
- double relative_step_size,
- double const* residuals_at_eval_point,
- double** parameters,
- double** jacobians) {
- using Eigen::Map;
- using Eigen::Matrix;
- using Eigen::Dynamic;
- using Eigen::RowMajor;
-
- typedef Matrix<double, Dynamic, 1> ResidualVector;
- typedef Matrix<double, Dynamic, 1> ParameterVector;
- typedef Matrix<double, Dynamic, Dynamic, RowMajor> JacobianMatrix;
-
- int num_residuals = function->num_residuals();
-
- Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block],
- num_residuals,
- parameter_block_size);
-
- // Mutate one element at a time and then restore.
- Map<ParameterVector> x_plus_delta(parameters[parameter_block],
- parameter_block_size);
- ParameterVector x(x_plus_delta);
- ParameterVector step_size = x.array().abs() * relative_step_size;
-
- // To handle cases where a paremeter is exactly zero, instead use the mean
- // step_size for the other dimensions.
- double fallback_step_size = step_size.sum() / step_size.rows();
- if (fallback_step_size == 0.0) {
- // If all the parameters are zero, there's no good answer. Use the given
- // relative step_size as absolute step_size and hope for the best.
- fallback_step_size = relative_step_size;
- }
-
- // For each parameter in the parameter block, use finite differences to
- // compute the derivative for that parameter.
- for (int j = 0; j < parameter_block_size; ++j) {
- if (step_size(j) == 0.0) {
- // The parameter is exactly zero, so compromise and use the mean step_size
- // from the other parameters. This can break in many cases, but it's hard
- // to pick a good number without problem specific knowledge.
- step_size(j) = fallback_step_size;
- }
- x_plus_delta(j) = x(j) + step_size(j);
-
- ResidualVector residuals(num_residuals);
- if (!function->Evaluate(parameters, &residuals[0], NULL)) {
- // Something went wrong; bail.
- return false;
- }
-
- // Compute this column of the jacobian in 3 steps:
- // 1. Store residuals for the forward part.
- // 2. Subtract residuals for the backward (or 0) part.
- // 3. Divide out the run.
- parameter_jacobian.col(j) = residuals;
-
- double one_over_h = 1 / step_size(j);
- if (method == CENTRAL) {
- // Compute the function on the other side of x(j).
- x_plus_delta(j) = x(j) - step_size(j);
-
- if (!function->Evaluate(parameters, &residuals[0], NULL)) {
- // Something went wrong; bail.
- return false;
- }
- parameter_jacobian.col(j) -= residuals;
- one_over_h /= 2;
- } else {
- // Forward difference only; reuse existing residuals evaluation.
- parameter_jacobian.col(j) -=
- Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
- }
- x_plus_delta(j) = x(j); // Restore x_plus_delta.
-
- // Divide out the run to get slope.
- parameter_jacobian.col(j) *= one_over_h;
- }
- return true;
-}
-
-class RuntimeNumericDiffCostFunction : public CostFunction {
- public:
- RuntimeNumericDiffCostFunction(const CostFunction* function,
- RuntimeNumericDiffMethod method,
- double relative_step_size)
- : function_(function),
- method_(method),
- relative_step_size_(relative_step_size) {
- *mutable_parameter_block_sizes() = function->parameter_block_sizes();
- set_num_residuals(function->num_residuals());
- }
-
- virtual ~RuntimeNumericDiffCostFunction() { }
-
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const {
- // Get the function value (residuals) at the the point to evaluate.
- bool success = function_->Evaluate(parameters, residuals, NULL);
- if (!success) {
- // Something went wrong; ignore the jacobian.
- return false;
- }
- if (!jacobians) {
- // Nothing to do; just forward.
- return true;
- }
-
- const vector<int16>& block_sizes = function_->parameter_block_sizes();
- CHECK(!block_sizes.empty());
-
- // Create local space for a copy of the parameters which will get mutated.
- int parameters_size = accumulate(block_sizes.begin(), block_sizes.end(), 0);
- vector<double> parameters_copy(parameters_size);
- vector<double*> parameters_references_copy(block_sizes.size());
- parameters_references_copy[0] = &parameters_copy[0];
- for (int block = 1; block < block_sizes.size(); ++block) {
- parameters_references_copy[block] = parameters_references_copy[block - 1]
- + block_sizes[block - 1];
- }
-
- // Copy the parameters into the local temp space.
- for (int block = 0; block < block_sizes.size(); ++block) {
- memcpy(parameters_references_copy[block],
- parameters[block],
- block_sizes[block] * sizeof(*parameters[block]));
- }
-
- for (int block = 0; block < block_sizes.size(); ++block) {
- if (!jacobians[block]) {
- // No jacobian requested for this parameter / residual pair.
- continue;
- }
- if (!EvaluateJacobianForParameterBlock(function_,
- block_sizes[block],
- block,
- method_,
- relative_step_size_,
- residuals,
- &parameters_references_copy[0],
- jacobians)) {
- return false;
- }
- }
- return true;
- }
-
- private:
- const CostFunction* function_;
- RuntimeNumericDiffMethod method_;
- double relative_step_size_;
-};
-
-} // namespace
-
-CostFunction* CreateRuntimeNumericDiffCostFunction(
- const CostFunction* cost_function,
- RuntimeNumericDiffMethod method,
- double relative_step_size) {
- return new RuntimeNumericDiffCostFunction(cost_function,
- method,
- relative_step_size);
-}
-
-} // namespace internal
-} // namespace ceres
diff --git a/internal/ceres/runtime_numeric_diff_cost_function.h b/internal/ceres/runtime_numeric_diff_cost_function.h
deleted file mode 100644
index 01b57f9..0000000
--- a/internal/ceres/runtime_numeric_diff_cost_function.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-//
-// Create CostFunctions as needed by the least squares framework with jacobians
-// computed via numeric differentiation.
-//
-// To get a numerically differentiated cost function, define a subclass of
-// CostFunction such that the Evaluate() function ignores the jacobian
-// parameter. The numeric differentiation wrapper will fill in the jacobian
-// parameter if nececssary by repeatedly calling the Evaluate() function with
-// small changes to the appropriate parameters, and computing the slope. This
-// implementation is not templated (hence the "Runtime" prefix), which is a bit
-// slower than but is more convenient than the templated version in
-// numeric_diff_cost_function.h
-//
-// The numerically differentiated version of a cost function for a cost function
-// can be constructed as follows:
-//
-// CostFunction* cost_function =
-// CreateRuntimeNumericDiffCostFunction(new MyCostFunction(...),
-// CENTRAL,
-// TAKE_OWNERSHIP);
-//
-// The central difference method is considerably more accurate; consider using
-// to start and only after that works, trying forward difference.
-//
-// TODO(keir): Characterize accuracy; mention pitfalls; provide alternatives.
-
-#ifndef CERES_INTERNAL_RUNTIME_NUMERIC_DIFF_COST_FUNCTION_H_
-#define CERES_INTERNAL_RUNTIME_NUMERIC_DIFF_COST_FUNCTION_H_
-
-#include "ceres/cost_function.h"
-
-namespace ceres {
-namespace internal {
-
-enum RuntimeNumericDiffMethod {
- CENTRAL,
- FORWARD,
-};
-
-// Create a cost function that evaluates the derivative with finite differences.
-// The base cost_function's implementation of Evaluate() only needs to fill in
-// the "residuals" argument and not the "jacobians". Any data written to the
-// jacobians by the base cost_function is overwritten.
-//
-// Forward difference or central difference is selected with CENTRAL or FORWARD.
-// The relative eps, which determines the step size for forward and central
-// differencing, is set with relative eps. Caller owns the resulting cost
-// function, and the resulting cost function does not own the base cost
-// function.
-CostFunction *CreateRuntimeNumericDiffCostFunction(
- const CostFunction *cost_function,
- RuntimeNumericDiffMethod method,
- double relative_eps);
-
-} // namespace internal
-} // namespace ceres
-
-#endif // CERES_INTERNAL_RUNTIME_NUMERIC_DIFF_COST_FUNCTION_H_
diff --git a/internal/ceres/runtime_numeric_diff_cost_function_test.cc b/internal/ceres/runtime_numeric_diff_cost_function_test.cc
deleted file mode 100644
index 71469ea..0000000
--- a/internal/ceres/runtime_numeric_diff_cost_function_test.cc
+++ /dev/null
@@ -1,222 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-//
-// Based on the tests in numeric_diff_cost_function.cc.
-//
-// TODO(keir): See about code duplication.
-
-#include "ceres/runtime_numeric_diff_cost_function.h"
-
-#include <algorithm>
-#include <cmath>
-#include <string>
-#include <vector>
-#include "ceres/cost_function.h"
-#include "ceres/internal/macros.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/stringprintf.h"
-#include "ceres/test_util.h"
-#include "glog/logging.h"
-#include "gtest/gtest.h"
-
-namespace ceres {
-namespace internal {
-
-const double kRelativeEps = 1e-6;
-
-// y1 = x1'x2 -> dy1/dx1 = x2, dy1/dx2 = x1
-// y2 = (x1'x2)^2 -> dy2/dx1 = 2 * x2 * (x1'x2), dy2/dx2 = 2 * x1 * (x1'x2)
-// y3 = x2'x2 -> dy3/dx1 = 0, dy3/dx2 = 2 * x2
-class TestCostFunction : public CostFunction {
- public:
- TestCostFunction() {
- set_num_residuals(3);
- mutable_parameter_block_sizes()->push_back(5); // x1.
- mutable_parameter_block_sizes()->push_back(5); // x2.
- }
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const {
- (void) jacobians; // Ignored.
-
- residuals[0] = residuals[1] = residuals[2] = 0;
- for (int i = 0; i < 5; ++i) {
- residuals[0] += parameters[0][i] * parameters[1][i];
- residuals[2] += parameters[1][i] * parameters[1][i];
- }
- residuals[1] = residuals[0] * residuals[0];
- return true;
- }
-};
-
-TEST(NumericDiffCostFunction, EasyCase) {
- // Try both central and forward difference.
- TestCostFunction term;
- scoped_ptr<CostFunction> cfs[2];
- cfs[0].reset(
- CreateRuntimeNumericDiffCostFunction(&term, CENTRAL, kRelativeEps));
-
- cfs[1].reset(
- CreateRuntimeNumericDiffCostFunction(&term, FORWARD, kRelativeEps));
-
-
- for (int c = 0; c < 2; ++c) {
- CostFunction *cost_function = cfs[c].get();
-
- double x1[] = { 1.0, 2.0, 3.0, 4.0, 5.0 };
- double x2[] = { 9.0, 9.0, 5.0, 5.0, 1.0 };
- double *parameters[] = { &x1[0], &x2[0] };
-
- double dydx1[15]; // 3 x 5, row major.
- double dydx2[15]; // 3 x 5, row major.
- double *jacobians[2] = { &dydx1[0], &dydx2[0] };
-
- double residuals[3] = {-1e-100, -2e-100, -3e-100 };
-
- ASSERT_TRUE(cost_function->Evaluate(&parameters[0],
- &residuals[0],
- &jacobians[0]));
-
- EXPECT_EQ(residuals[0], 67);
- EXPECT_EQ(residuals[1], 4489);
- EXPECT_EQ(residuals[2], 213);
-
- for (int i = 0; i < 5; ++i) {
- LOG(INFO) << "c = " << c << " i = " << i;
- const double kEps = c == 0 ? /* central */ 3e-9 : /* forward */ 2e-5;
-
- ExpectClose(x2[i], dydx1[5 * 0 + i], kEps); // y1
- ExpectClose(x1[i], dydx2[5 * 0 + i], kEps);
- ExpectClose(2 * x2[i] * residuals[0], dydx1[5 * 1 + i], kEps); // y2
- ExpectClose(2 * x1[i] * residuals[0], dydx2[5 * 1 + i], kEps);
- ExpectClose(0.0, dydx1[5 * 2 + i], kEps); // y3
- ExpectClose(2 * x2[i], dydx2[5 * 2 + i], kEps);
- }
- }
-}
-
-// y1 = sin(x1'x2)
-// y2 = exp(-x1'x2 / 10)
-//
-// dy1/dx1 = x2 * cos(x1'x2), dy1/dx2 = x1 * cos(x1'x2)
-// dy2/dx1 = -x2 * exp(-x1'x2 / 10) / 10, dy2/dx2 = -x2 * exp(-x1'x2 / 10) / 10
-class TranscendentalTestCostFunction : public CostFunction {
- public:
- TranscendentalTestCostFunction() {
- set_num_residuals(2);
- mutable_parameter_block_sizes()->push_back(5); // x1.
- mutable_parameter_block_sizes()->push_back(5); // x2.
- }
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const {
- (void) jacobians; // Ignored.
-
- double x1x2 = 0;
- for (int i = 0; i < 5; ++i) {
- x1x2 += parameters[0][i] * parameters[1][i];
- }
- residuals[0] = sin(x1x2);
- residuals[1] = exp(-x1x2 / 10);
- return true;
- }
-};
-
-TEST(NumericDiffCostFunction, TransendentalOperationsInCostFunction) {
- // Try both central and forward difference.
- TranscendentalTestCostFunction term;
- scoped_ptr<CostFunction> cfs[2];
- cfs[0].reset(
- CreateRuntimeNumericDiffCostFunction(&term, CENTRAL, kRelativeEps));
-
- cfs[1].reset(
- CreateRuntimeNumericDiffCostFunction(&term, FORWARD, kRelativeEps));
-
- for (int c = 0; c < 2; ++c) {
- CostFunction *cost_function = cfs[c].get();
-
- struct {
- double x1[5];
- double x2[5];
- } kTests[] = {
- { { 1.0, 2.0, 3.0, 4.0, 5.0 }, // No zeros.
- { 9.0, 9.0, 5.0, 5.0, 1.0 },
- },
- { { 0.0, 2.0, 3.0, 0.0, 5.0 }, // Some zeros x1.
- { 9.0, 9.0, 5.0, 5.0, 1.0 },
- },
- { { 1.0, 2.0, 3.0, 1.0, 5.0 }, // Some zeros x2.
- { 0.0, 9.0, 0.0, 5.0, 0.0 },
- },
- { { 0.0, 0.0, 0.0, 0.0, 0.0 }, // All zeros x1.
- { 9.0, 9.0, 5.0, 5.0, 1.0 },
- },
- { { 1.0, 2.0, 3.0, 4.0, 5.0 }, // All zeros x2.
- { 0.0, 0.0, 0.0, 0.0, 0.0 },
- },
- { { 0.0, 0.0, 0.0, 0.0, 0.0 }, // All zeros.
- { 0.0, 0.0, 0.0, 0.0, 0.0 },
- },
- };
- for (int k = 0; k < CERES_ARRAYSIZE(kTests); ++k) {
- double *x1 = &(kTests[k].x1[0]);
- double *x2 = &(kTests[k].x2[0]);
- double *parameters[] = { x1, x2 };
-
- double dydx1[10];
- double dydx2[10];
- double *jacobians[2] = { &dydx1[0], &dydx2[0] };
-
- double residuals[2];
-
- ASSERT_TRUE(cost_function->Evaluate(&parameters[0],
- &residuals[0],
- &jacobians[0]));
- LOG(INFO) << "Ran evaluate for test k=" << k << " c=" << c;
-
- double x1x2 = 0;
- for (int i = 0; i < 5; ++i) {
- x1x2 += x1[i] * x2[i];
- }
-
- for (int i = 0; i < 5; ++i) {
- const double kEps = (c == 0 ? /* central */ 3e-9 : /* forward */ 2e-5);
-
- ExpectClose( x2[i] * cos(x1x2), dydx1[5 * 0 + i], kEps); // NOLINT
- ExpectClose( x1[i] * cos(x1x2), dydx2[5 * 0 + i], kEps); // NOLINT
- ExpectClose(-x2[i] * exp(-x1x2 / 10.) / 10., dydx1[5 * 1 + i], kEps);
- ExpectClose(-x1[i] * exp(-x1x2 / 10.) / 10., dydx2[5 * 1 + i], kEps);
- }
- }
- }
-}
-
-} // namespace internal
-} // namespace ceres
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
index b192aa1..d2aa168 100644
--- a/internal/ceres/schur_complement_solver.cc
+++ b/internal/ceres/schur_complement_solver.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -28,12 +28,13 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
+#include "ceres/internal/port.h"
+
#include <algorithm>
#include <ctime>
#include <set>
#include <vector>
-#include "Eigen/Dense"
#include "ceres/block_random_access_dense_matrix.h"
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_random_access_sparse_matrix.h"
@@ -42,7 +43,6 @@
#include "ceres/cxsparse.h"
#include "ceres/detect_structure.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/lapack.h"
#include "ceres/linear_solver.h"
@@ -51,6 +51,8 @@
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "ceres/wall_time.h"
+#include "Eigen/Dense"
+#include "Eigen/SparseCore"
namespace ceres {
namespace internal {
@@ -75,24 +77,19 @@ LinearSolver::Summary SchurComplementSolver::SolveImpl(
fill(x, x + A->num_cols(), 0.0);
event_logger.AddEvent("Setup");
- LinearSolver::Summary summary;
- summary.num_iterations = 1;
- summary.termination_type = FAILURE;
eliminator_->Eliminate(A, b, per_solve_options.D, lhs_.get(), rhs_.get());
event_logger.AddEvent("Eliminate");
double* reduced_solution = x + A->num_cols() - lhs_->num_cols();
- const bool status = SolveReducedLinearSystem(reduced_solution);
+ const LinearSolver::Summary summary =
+ SolveReducedLinearSystem(reduced_solution);
event_logger.AddEvent("ReducedSolve");
- if (!status) {
- return summary;
+ if (summary.termination_type == LINEAR_SOLVER_SUCCESS) {
+ eliminator_->BackSubstitute(A, b, per_solve_options.D, reduced_solution, x);
+ event_logger.AddEvent("BackSubstitute");
}
- eliminator_->BackSubstitute(A, b, per_solve_options.D, reduced_solution, x);
- summary.termination_type = TOLERANCE;
-
- event_logger.AddEvent("BackSubstitute");
return summary;
}
@@ -117,7 +114,13 @@ void DenseSchurComplementSolver::InitStorage(
// Solve the system Sx = r, assuming that the matrix S is stored in a
// BlockRandomAccessDenseMatrix. The linear system is solved using
// Eigen's Cholesky factorization.
-bool DenseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
+LinearSolver::Summary
+DenseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message = "Success.";
+
const BlockRandomAccessDenseMatrix* m =
down_cast<const BlockRandomAccessDenseMatrix*>(lhs());
const int num_rows = m->num_rows();
@@ -125,29 +128,36 @@ bool DenseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
// The case where there are no f blocks, and the system is block
// diagonal.
if (num_rows == 0) {
- return true;
+ return summary;
}
+ summary.num_iterations = 1;
+
if (options().dense_linear_algebra_library_type == EIGEN) {
- // TODO(sameeragarwal): Add proper error handling; this completely ignores
- // the quality of the solution to the solve.
- VectorRef(solution, num_rows) =
+ Eigen::LLT<Matrix, Eigen::Upper> llt =
ConstMatrixRef(m->values(), num_rows, num_rows)
.selfadjointView<Eigen::Upper>()
- .llt()
- .solve(ConstVectorRef(rhs(), num_rows));
- return true;
+ .llt();
+ if (llt.info() != Eigen::Success) {
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message =
+ "Eigen failure. Unable to perform dense Cholesky factorization.";
+ return summary;
+ }
+
+ VectorRef(solution, num_rows) = llt.solve(ConstVectorRef(rhs(), num_rows));
+ } else {
+ VectorRef(solution, num_rows) = ConstVectorRef(rhs(), num_rows);
+ summary.termination_type =
+ LAPACK::SolveInPlaceUsingCholesky(num_rows,
+ m->values(),
+ solution,
+ &summary.message);
}
- VectorRef(solution, num_rows) = ConstVectorRef(rhs(), num_rows);
- const int info = LAPACK::SolveInPlaceUsingCholesky(num_rows,
- m->values(),
- solution);
- return (info == 0);
+ return summary;
}
-#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
-
SparseSchurComplementSolver::SparseSchurComplementSolver(
const LinearSolver::Options& options)
: SchurComplementSolver(options),
@@ -156,19 +166,15 @@ SparseSchurComplementSolver::SparseSchurComplementSolver(
}
SparseSchurComplementSolver::~SparseSchurComplementSolver() {
-#ifndef CERES_NO_SUITESPARSE
if (factor_ != NULL) {
ss_.Free(factor_);
factor_ = NULL;
}
-#endif // CERES_NO_SUITESPARSE
-#ifndef CERES_NO_CXSPARSE
if (cxsparse_factor_ != NULL) {
cxsparse_.Free(cxsparse_factor_);
cxsparse_factor_ = NULL;
}
-#endif // CERES_NO_CXSPARSE
}
// Determine the non-zero blocks in the Schur Complement matrix, and
@@ -242,40 +248,57 @@ void SparseSchurComplementSolver::InitStorage(
set_rhs(new double[lhs()->num_rows()]);
}
-bool SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
+LinearSolver::Summary
+SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
switch (options().sparse_linear_algebra_library_type) {
case SUITE_SPARSE:
return SolveReducedLinearSystemUsingSuiteSparse(solution);
case CX_SPARSE:
return SolveReducedLinearSystemUsingCXSparse(solution);
+ case EIGEN_SPARSE:
+ return SolveReducedLinearSystemUsingEigen(solution);
default:
LOG(FATAL) << "Unknown sparse linear algebra library : "
<< options().sparse_linear_algebra_library_type;
}
- LOG(FATAL) << "Unknown sparse linear algebra library : "
- << options().sparse_linear_algebra_library_type;
- return false;
+ return LinearSolver::Summary();
}
-#ifndef CERES_NO_SUITESPARSE
// Solve the system Sx = r, assuming that the matrix S is stored in a
// BlockRandomAccessSparseMatrix. The linear system is solved using
// CHOLMOD's sparse cholesky factorization routines.
-bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
+LinearSolver::Summary
+SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
double* solution) {
+#ifdef CERES_NO_SUITESPARSE
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message = "Ceres was not built with SuiteSparse support. "
+ "Therefore, SPARSE_SCHUR cannot be used with SUITE_SPARSE";
+ return summary;
+
+#else
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message = "Success.";
+
TripletSparseMatrix* tsm =
const_cast<TripletSparseMatrix*>(
down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
-
const int num_rows = tsm->num_rows();
// The case where there are no f blocks, and the system is block
// diagonal.
if (num_rows == 0) {
- return true;
+ return summary;
}
+ summary.num_iterations = 1;
cholmod_sparse* cholmod_lhs = NULL;
if (options().use_postordering) {
// If we are going to do a full symbolic analysis of the schur
@@ -288,7 +311,10 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
cholmod_lhs->stype = 1;
if (factor_ == NULL) {
- factor_ = ss_.BlockAnalyzeCholesky(cholmod_lhs, blocks_, blocks_);
+ factor_ = ss_.BlockAnalyzeCholesky(cholmod_lhs,
+ blocks_,
+ blocks_,
+ &summary.message);
}
} else {
// If we are going to use the natural ordering (i.e. rely on the
@@ -301,53 +327,83 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
cholmod_lhs->stype = -1;
if (factor_ == NULL) {
- factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(cholmod_lhs);
+ factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(cholmod_lhs,
+ &summary.message);
}
}
- cholmod_dense* cholmod_rhs =
- ss_.CreateDenseVector(const_cast<double*>(rhs()), num_rows, num_rows);
- cholmod_dense* cholmod_solution =
- ss_.SolveCholesky(cholmod_lhs, factor_, cholmod_rhs);
+ if (factor_ == NULL) {
+ ss_.Free(cholmod_lhs);
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ // No need to set message as it has already been set by the
+ // symbolic analysis routines above.
+ return summary;
+ }
+
+ summary.termination_type =
+ ss_.Cholesky(cholmod_lhs, factor_, &summary.message);
ss_.Free(cholmod_lhs);
+
+ if (summary.termination_type != LINEAR_SOLVER_SUCCESS) {
+ // No need to set message as it has already been set by the
+ // numeric factorization routine above.
+ return summary;
+ }
+
+ cholmod_dense* cholmod_rhs =
+ ss_.CreateDenseVector(const_cast<double*>(rhs()), num_rows, num_rows);
+ cholmod_dense* cholmod_solution = ss_.Solve(factor_,
+ cholmod_rhs,
+ &summary.message);
ss_.Free(cholmod_rhs);
if (cholmod_solution == NULL) {
- LOG(WARNING) << "CHOLMOD solve failed.";
- return false;
+ summary.message =
+ "SuiteSparse failure. Unable to perform triangular solve.";
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ return summary;
}
VectorRef(solution, num_rows)
= VectorRef(static_cast<double*>(cholmod_solution->x), num_rows);
ss_.Free(cholmod_solution);
- return true;
-}
-#else
-bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
- double* solution) {
- LOG(FATAL) << "No SuiteSparse support in Ceres.";
- return false;
-}
+ return summary;
#endif // CERES_NO_SUITESPARSE
+}
-#ifndef CERES_NO_CXSPARSE
// Solve the system Sx = r, assuming that the matrix S is stored in a
// BlockRandomAccessSparseMatrix. The linear system is solved using
// CXSparse's sparse cholesky factorization routines.
-bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
+LinearSolver::Summary
+SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
double* solution) {
+#ifdef CERES_NO_CXSPARSE
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message = "Ceres was not built with CXSparse support. "
+ "Therefore, SPARSE_SCHUR cannot be used with CX_SPARSE";
+ return summary;
+
+#else
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message = "Success.";
+
// Extract the TripletSparseMatrix that is used for actually storing S.
TripletSparseMatrix* tsm =
const_cast<TripletSparseMatrix*>(
down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
-
const int num_rows = tsm->num_rows();
// The case where there are no f blocks, and the system is block
// diagonal.
if (num_rows == 0) {
- return true;
+ return summary;
}
cs_di* lhs = CHECK_NOTNULL(cxsparse_.CreateSparseMatrix(tsm));
@@ -355,24 +411,108 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
// Compute symbolic factorization if not available.
if (cxsparse_factor_ == NULL) {
- cxsparse_factor_ =
- CHECK_NOTNULL(cxsparse_.BlockAnalyzeCholesky(lhs, blocks_, blocks_));
+ cxsparse_factor_ = cxsparse_.BlockAnalyzeCholesky(lhs, blocks_, blocks_);
}
- // Solve the linear system.
- bool ok = cxsparse_.SolveCholesky(lhs, cxsparse_factor_, solution);
+ if (cxsparse_factor_ == NULL) {
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message =
+ "CXSparse failure. Unable to find symbolic factorization.";
+ } else if (!cxsparse_.SolveCholesky(lhs, cxsparse_factor_, solution)) {
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message = "CXSparse::SolveCholesky failed.";
+ }
cxsparse_.Free(lhs);
- return ok;
+ return summary;
+#endif // CERES_NO_CXPARSE
}
-#else
-bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
+
+// Solve the system Sx = r, assuming that the matrix S is stored in a
+// BlockRandomAccessSparseMatrix. The linear system is solved using
+// Eigen's sparse cholesky factorization routines.
+LinearSolver::Summary
+SparseSchurComplementSolver::SolveReducedLinearSystemUsingEigen(
double* solution) {
- LOG(FATAL) << "No CXSparse support in Ceres.";
- return false;
+#ifndef CERES_USE_EIGEN_SPARSE
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message =
+ "SPARSE_SCHUR cannot be used with EIGEN_SPARSE. "
+ "Ceres was not built with support for "
+ "Eigen's SimplicialLDLT decomposition. "
+ "This requires enabling building with -DEIGENSPARSE=ON.";
+ return summary;
+
+#else
+ EventLogger event_logger("SchurComplementSolver::EigenSolve");
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message = "Success.";
+
+ // Extract the TripletSparseMatrix that is used for actually storing S.
+ TripletSparseMatrix* tsm =
+ const_cast<TripletSparseMatrix*>(
+ down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
+ const int num_rows = tsm->num_rows();
+
+ // The case where there are no f blocks, and the system is block
+ // diagonal.
+ if (num_rows == 0) {
+ return summary;
+ }
+
+ // This is an upper triangular matrix.
+ CompressedRowSparseMatrix crsm(*tsm);
+ // Map this to a column major, lower triangular matrix.
+ Eigen::MappedSparseMatrix<double, Eigen::ColMajor> eigen_lhs(
+ crsm.num_rows(),
+ crsm.num_rows(),
+ crsm.num_nonzeros(),
+ crsm.mutable_rows(),
+ crsm.mutable_cols(),
+ crsm.mutable_values());
+ event_logger.AddEvent("ToCompressedRowSparseMatrix");
+
+ // Compute symbolic factorization if one does not exist.
+ if (simplicial_ldlt_.get() == NULL) {
+ simplicial_ldlt_.reset(new SimplicialLDLT);
+ // This ordering is quite bad. The scalar ordering produced by the
+ // AMD algorithm is quite bad and can be an order of magnitude
+ // worse than the one computed using the block version of the
+ // algorithm.
+ simplicial_ldlt_->analyzePattern(eigen_lhs);
+ event_logger.AddEvent("Analysis");
+ if (simplicial_ldlt_->info() != Eigen::Success) {
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message =
+ "Eigen failure. Unable to find symbolic factorization.";
+ return summary;
+ }
+ }
+
+ simplicial_ldlt_->factorize(eigen_lhs);
+ event_logger.AddEvent("Factorize");
+ if (simplicial_ldlt_->info() != Eigen::Success) {
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message = "Eigen failure. Unable to find numeric factoriztion.";
+ return summary;
+ }
+
+ VectorRef(solution, num_rows) =
+ simplicial_ldlt_->solve(ConstVectorRef(rhs(), num_rows));
+ event_logger.AddEvent("Solve");
+ if (simplicial_ldlt_->info() != Eigen::Success) {
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message = "Eigen failure. Unable to do triangular solve.";
+ }
+
+ return summary;
+#endif // CERES_USE_EIGEN_SPARSE
}
-#endif // CERES_NO_CXPARSE
-#endif // !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/schur_complement_solver.h b/internal/ceres/schur_complement_solver.h
index b5a1c74..723b149 100644
--- a/internal/ceres/schur_complement_solver.h
+++ b/internal/ceres/schur_complement_solver.h
@@ -35,6 +35,8 @@
#include <utility>
#include <vector>
+#include "ceres/internal/port.h"
+
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
@@ -45,6 +47,10 @@
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
+#ifdef CERES_USE_EIGEN_SPARSE
+#include "Eigen/SparseCholesky"
+#endif
+
namespace ceres {
namespace internal {
@@ -126,7 +132,8 @@ class SchurComplementSolver : public BlockSparseMatrixSolver {
private:
virtual void InitStorage(const CompressedRowBlockStructure* bs) = 0;
- virtual bool SolveReducedLinearSystem(double* solution) = 0;
+ virtual LinearSolver::Summary SolveReducedLinearSystem(
+ double* solution) = 0;
LinearSolver::Options options_;
@@ -146,12 +153,12 @@ class DenseSchurComplementSolver : public SchurComplementSolver {
private:
virtual void InitStorage(const CompressedRowBlockStructure* bs);
- virtual bool SolveReducedLinearSystem(double* solution);
+ virtual LinearSolver::Summary SolveReducedLinearSystem(
+ double* solution);
CERES_DISALLOW_COPY_AND_ASSIGN(DenseSchurComplementSolver);
};
-#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
// Sparse Cholesky factorization based solver.
class SparseSchurComplementSolver : public SchurComplementSolver {
public:
@@ -160,9 +167,14 @@ class SparseSchurComplementSolver : public SchurComplementSolver {
private:
virtual void InitStorage(const CompressedRowBlockStructure* bs);
- virtual bool SolveReducedLinearSystem(double* solution);
- bool SolveReducedLinearSystemUsingSuiteSparse(double* solution);
- bool SolveReducedLinearSystemUsingCXSparse(double* solution);
+ virtual LinearSolver::Summary SolveReducedLinearSystem(
+ double* solution);
+ LinearSolver::Summary SolveReducedLinearSystemUsingSuiteSparse(
+ double* solution);
+ LinearSolver::Summary SolveReducedLinearSystemUsingCXSparse(
+ double* solution);
+ LinearSolver::Summary SolveReducedLinearSystemUsingEigen(
+ double* solution);
// Size of the blocks in the Schur complement.
vector<int> blocks_;
@@ -175,10 +187,15 @@ class SparseSchurComplementSolver : public SchurComplementSolver {
CXSparse cxsparse_;
// Cached factorization
cs_dis* cxsparse_factor_;
+
+#ifdef CERES_USE_EIGEN_SPARSE
+ typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double> > SimplicialLDLT;
+ scoped_ptr<SimplicialLDLT> simplicial_ldlt_;
+#endif
+
CERES_DISALLOW_COPY_AND_ASSIGN(SparseSchurComplementSolver);
};
-#endif // !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE)
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/schur_complement_solver_test.cc b/internal/ceres/schur_complement_solver_test.cc
index d91c162..8e71b2e 100644
--- a/internal/ceres/schur_complement_solver_test.cc
+++ b/internal/ceres/schur_complement_solver_test.cc
@@ -187,17 +187,31 @@ TEST_F(SchurComplementSolverTest,
#ifndef CERES_NO_CXSPARSE
TEST_F(SchurComplementSolverTest,
- SparseSchurWithSuiteSparseSmallProblem) {
+ SparseSchurWithCXSparseSmallProblem) {
ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
}
TEST_F(SchurComplementSolverTest,
- SparseSchurWithSuiteSparseLargeProblem) {
+ SparseSchurWithCXSparseLargeProblem) {
ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, CX_SPARSE, true);
}
#endif // CERES_NO_CXSPARSE
+#ifdef CERES_USE_EIGEN_SPARSE
+TEST_F(SchurComplementSolverTest,
+ SparseSchurWithEigenSparseSmallProblem) {
+ ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+ ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+}
+
+TEST_F(SchurComplementSolverTest,
+ SparseSchurWithEigenSparseLargeProblem) {
+ ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+ ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, EIGEN, EIGEN_SPARSE, true);
+}
+#endif // CERES_USE_EIGEN_SPARSE
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/schur_eliminator.cc b/internal/ceres/schur_eliminator.cc
index 31f8354..4d9b175 100644
--- a/internal/ceres/schur_eliminator.cc
+++ b/internal/ceres/schur_eliminator.cc
@@ -37,7 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specializations.py.
+// This file is generated using generate_eliminator_specialization.py.
// Editing it manually is not recommended.
#include "ceres/linear_solver.h"
@@ -102,9 +102,24 @@ SchurEliminatorBase::Create(const LinearSolver::Options& options) {
}
if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
+ (options.f_block_size == 8)) {
+ return new SchurEliminator<2, 4, 8>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 9)) {
+ return new SchurEliminator<2, 4, 9>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
(options.f_block_size == Eigen::Dynamic)) {
return new SchurEliminator<2, 4, Eigen::Dynamic>(options);
}
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == Eigen::Dynamic) &&
+ (options.f_block_size == Eigen::Dynamic)) {
+ return new SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>(options);
+ }
if ((options.row_block_size == 4) &&
(options.e_block_size == 4) &&
(options.f_block_size == 2)) {
diff --git a/internal/ceres/schur_eliminator_impl.h b/internal/ceres/schur_eliminator_impl.h
index c09b7fb..305d94e 100644
--- a/internal/ceres/schur_eliminator_impl.h
+++ b/internal/ceres/schur_eliminator_impl.h
@@ -45,6 +45,9 @@
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifdef CERES_USE_OPENMP
#include <omp.h>
#endif
diff --git a/internal/ceres/schur_jacobi_preconditioner.cc b/internal/ceres/schur_jacobi_preconditioner.cc
index 338df71..6dc9e89 100644
--- a/internal/ceres/schur_jacobi_preconditioner.cc
+++ b/internal/ceres/schur_jacobi_preconditioner.cc
@@ -33,10 +33,9 @@
#include <utility>
#include <vector>
#include "Eigen/Dense"
-#include "ceres/block_random_access_sparse_matrix.h"
+#include "ceres/block_random_access_diagonal_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/collections_port.h"
-#include "ceres/detect_structure.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_solver.h"
#include "ceres/schur_eliminator.h"
@@ -57,16 +56,11 @@ SchurJacobiPreconditioner::SchurJacobiPreconditioner(
<< "SCHUR_JACOBI preconditioner.";
block_size_.resize(num_blocks);
- set<pair<int, int> > block_pairs;
-
- int num_block_diagonal_entries = 0;
for (int i = 0; i < num_blocks; ++i) {
block_size_[i] = bs.cols[i + options_.elimination_groups[0]].size;
- block_pairs.insert(make_pair(i, i));
- num_block_diagonal_entries += block_size_[i] * block_size_[i];
}
- m_.reset(new BlockRandomAccessSparseMatrix(block_size_, block_pairs));
+ m_.reset(new BlockRandomAccessDiagonalMatrix(block_size_));
InitEliminator(bs);
}
@@ -77,17 +71,13 @@ SchurJacobiPreconditioner::~SchurJacobiPreconditioner() {
void SchurJacobiPreconditioner::InitEliminator(
const CompressedRowBlockStructure& bs) {
LinearSolver::Options eliminator_options;
-
eliminator_options.elimination_groups = options_.elimination_groups;
eliminator_options.num_threads = options_.num_threads;
-
- DetectStructure(bs, options_.elimination_groups[0],
- &eliminator_options.row_block_size,
- &eliminator_options.e_block_size,
- &eliminator_options.f_block_size);
-
+ eliminator_options.e_block_size = options_.e_block_size;
+ eliminator_options.f_block_size = options_.f_block_size;
+ eliminator_options.row_block_size = options_.row_block_size;
eliminator_.reset(SchurEliminatorBase::Create(eliminator_options));
- eliminator_->Init(options_.elimination_groups[0], &bs);
+ eliminator_->Init(eliminator_options.elimination_groups[0], &bs);
}
// Update the values of the preconditioner matrix and factorize it.
@@ -118,7 +108,7 @@ void SchurJacobiPreconditioner::RightMultiply(const double* x,
CHECK_NOTNULL(y);
const double* lhs_values =
- down_cast<BlockRandomAccessSparseMatrix*>(m_.get())->matrix()->values();
+ down_cast<BlockRandomAccessDiagonalMatrix*>(m_.get())->matrix()->values();
// This loop can be easily multi-threaded with OpenMP if need be.
for (int i = 0; i < block_size_.size(); ++i) {
diff --git a/internal/ceres/schur_jacobi_preconditioner.h b/internal/ceres/schur_jacobi_preconditioner.h
index f6e7b0d..aecb015 100644
--- a/internal/ceres/schur_jacobi_preconditioner.h
+++ b/internal/ceres/schur_jacobi_preconditioner.h
@@ -49,7 +49,7 @@
namespace ceres {
namespace internal {
-class BlockRandomAccessSparseMatrix;
+class BlockRandomAccessDiagonalMatrix;
class BlockSparseMatrix;
struct CompressedRowBlockStructure;
class SchurEliminatorBase;
@@ -100,7 +100,7 @@ class SchurJacobiPreconditioner : public BlockSparseMatrixPreconditioner {
scoped_ptr<SchurEliminatorBase> eliminator_;
// Preconditioner matrix.
- scoped_ptr<BlockRandomAccessSparseMatrix> m_;
+ scoped_ptr<BlockRandomAccessDiagonalMatrix> m_;
CERES_DISALLOW_COPY_AND_ASSIGN(SchurJacobiPreconditioner);
};
diff --git a/internal/ceres/schur_ordering.cc b/internal/ceres/schur_ordering.cc
deleted file mode 100644
index 1cdff4e..0000000
--- a/internal/ceres/schur_ordering.cc
+++ /dev/null
@@ -1,101 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: sameeragarwal@google.com (Sameer Agarwal)
-
-#include "ceres/schur_ordering.h"
-
-#include "ceres/graph.h"
-#include "ceres/graph_algorithms.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/map_util.h"
-#include "ceres/parameter_block.h"
-#include "ceres/program.h"
-#include "ceres/residual_block.h"
-#include "glog/logging.h"
-
-namespace ceres {
-namespace internal {
-
-int ComputeSchurOrdering(const Program& program,
- vector<ParameterBlock*>* ordering) {
- CHECK_NOTNULL(ordering)->clear();
-
- scoped_ptr<Graph< ParameterBlock*> > graph(
- CHECK_NOTNULL(CreateHessianGraph(program)));
- int independent_set_size = IndependentSetOrdering(*graph, ordering);
- const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
-
- // Add the excluded blocks to back of the ordering vector.
- for (int i = 0; i < parameter_blocks.size(); ++i) {
- ParameterBlock* parameter_block = parameter_blocks[i];
- if (parameter_block->IsConstant()) {
- ordering->push_back(parameter_block);
- }
- }
-
- return independent_set_size;
-}
-
-Graph<ParameterBlock*>*
-CreateHessianGraph(const Program& program) {
- Graph<ParameterBlock*>* graph = new Graph<ParameterBlock*>;
- const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
- for (int i = 0; i < parameter_blocks.size(); ++i) {
- ParameterBlock* parameter_block = parameter_blocks[i];
- if (!parameter_block->IsConstant()) {
- graph->AddVertex(parameter_block);
- }
- }
-
- const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
- for (int i = 0; i < residual_blocks.size(); ++i) {
- const ResidualBlock* residual_block = residual_blocks[i];
- const int num_parameter_blocks = residual_block->NumParameterBlocks();
- ParameterBlock* const* parameter_blocks =
- residual_block->parameter_blocks();
- for (int j = 0; j < num_parameter_blocks; ++j) {
- if (parameter_blocks[j]->IsConstant()) {
- continue;
- }
-
- for (int k = j + 1; k < num_parameter_blocks; ++k) {
- if (parameter_blocks[k]->IsConstant()) {
- continue;
- }
-
- graph->AddEdge(parameter_blocks[j], parameter_blocks[k]);
- }
- }
- }
-
- return graph;
-}
-
-} // namespace internal
-} // namespace ceres
diff --git a/internal/ceres/schur_ordering_test.cc b/internal/ceres/schur_ordering_test.cc
deleted file mode 100644
index bd74ebb..0000000
--- a/internal/ceres/schur_ordering_test.cc
+++ /dev/null
@@ -1,177 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: sameeragarwal@google.com (Sameer Agarwal)
-
-#include "ceres/schur_ordering.h"
-
-#include <cstddef>
-#include <vector>
-#include "gtest/gtest.h"
-#include "ceres/collections_port.h"
-#include "ceres/graph.h"
-#include "ceres/problem_impl.h"
-#include "ceres/program.h"
-#include "ceres/stl_util.h"
-#include "ceres/cost_function.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/sized_cost_function.h"
-
-namespace ceres {
-namespace internal {
-
-typedef Graph<ParameterBlock*> HessianGraph;
-typedef HashSet<ParameterBlock*> VertexSet;
-
-template <int M, int N1 = 0, int N2 = 0, int N3 = 0>
-class DummyCostFunction: public SizedCostFunction<M, N1, N2, N3> {
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const {
- return true;
- }
-};
-
-class SchurOrderingTest : public ::testing::Test {
- protected :
- virtual void SetUp() {
- // The explicit calls to AddParameterBlock are necessary because
- // the below tests depend on the specific numbering of the
- // parameter blocks.
- problem_.AddParameterBlock(x_, 3);
- problem_.AddParameterBlock(y_, 4);
- problem_.AddParameterBlock(z_, 5);
- problem_.AddParameterBlock(w_, 6);
-
- problem_.AddResidualBlock(new DummyCostFunction<2, 3>, NULL, x_);
- problem_.AddResidualBlock(new DummyCostFunction<6, 5, 4>, NULL, z_, y_);
- problem_.AddResidualBlock(new DummyCostFunction<3, 3, 5>, NULL, x_, z_);
- problem_.AddResidualBlock(new DummyCostFunction<7, 5, 3>, NULL, z_, x_);
- problem_.AddResidualBlock(new DummyCostFunction<1, 5, 3, 6>, NULL,
- z_, x_, w_);
- }
-
- ProblemImpl problem_;
- double x_[3], y_[4], z_[5], w_[6];
-};
-
-TEST_F(SchurOrderingTest, NoFixed) {
- const Program& program = problem_.program();
- const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
- scoped_ptr<HessianGraph> graph(CreateHessianGraph(program));
-
- const VertexSet& vertices = graph->vertices();
- EXPECT_EQ(vertices.size(), 4);
-
- for (int i = 0; i < 4; ++i) {
- EXPECT_TRUE(vertices.find(parameter_blocks[i]) != vertices.end());
- }
-
- {
- const VertexSet& neighbors = graph->Neighbors(parameter_blocks[0]);
- EXPECT_EQ(neighbors.size(), 2);
- EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
- EXPECT_TRUE(neighbors.find(parameter_blocks[3]) != neighbors.end());
- }
-
- {
- const VertexSet& neighbors = graph->Neighbors(parameter_blocks[1]);
- EXPECT_EQ(neighbors.size(), 1);
- EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
- }
-
- {
- const VertexSet& neighbors = graph->Neighbors(parameter_blocks[2]);
- EXPECT_EQ(neighbors.size(), 3);
- EXPECT_TRUE(neighbors.find(parameter_blocks[0]) != neighbors.end());
- EXPECT_TRUE(neighbors.find(parameter_blocks[1]) != neighbors.end());
- EXPECT_TRUE(neighbors.find(parameter_blocks[3]) != neighbors.end());
- }
-
- {
- const VertexSet& neighbors = graph->Neighbors(parameter_blocks[3]);
- EXPECT_EQ(neighbors.size(), 2);
- EXPECT_TRUE(neighbors.find(parameter_blocks[0]) != neighbors.end());
- EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
- }
-}
-
-TEST_F(SchurOrderingTest, AllFixed) {
- problem_.SetParameterBlockConstant(x_);
- problem_.SetParameterBlockConstant(y_);
- problem_.SetParameterBlockConstant(z_);
- problem_.SetParameterBlockConstant(w_);
-
- const Program& program = problem_.program();
- scoped_ptr<HessianGraph> graph(CreateHessianGraph(program));
- EXPECT_EQ(graph->vertices().size(), 0);
-}
-
-TEST_F(SchurOrderingTest, OneFixed) {
- problem_.SetParameterBlockConstant(x_);
-
- const Program& program = problem_.program();
- const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
- scoped_ptr<HessianGraph> graph(CreateHessianGraph(program));
-
- const VertexSet& vertices = graph->vertices();
-
- EXPECT_EQ(vertices.size(), 3);
- EXPECT_TRUE(vertices.find(parameter_blocks[0]) == vertices.end());
-
- for (int i = 1; i < 3; ++i) {
- EXPECT_TRUE(vertices.find(parameter_blocks[i]) != vertices.end());
- }
-
- {
- const VertexSet& neighbors = graph->Neighbors(parameter_blocks[1]);
- EXPECT_EQ(neighbors.size(), 1);
- EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
- }
-
- {
- const VertexSet& neighbors = graph->Neighbors(parameter_blocks[2]);
- EXPECT_EQ(neighbors.size(), 2);
- EXPECT_TRUE(neighbors.find(parameter_blocks[1]) != neighbors.end());
- EXPECT_TRUE(neighbors.find(parameter_blocks[3]) != neighbors.end());
- }
-
- {
- const VertexSet& neighbors = graph->Neighbors(parameter_blocks[3]);
- EXPECT_EQ(neighbors.size(), 1);
- EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
- }
-
- // The constant parameter block is at the end.
- vector<ParameterBlock*> ordering;
- ComputeSchurOrdering(program, &ordering);
- EXPECT_EQ(ordering.back(), parameter_blocks[0]);
-}
-
-} // namespace internal
-} // namespace ceres
diff --git a/internal/ceres/single_linkage_clustering.cc b/internal/ceres/single_linkage_clustering.cc
new file mode 100644
index 0000000..0a8b20c
--- /dev/null
+++ b/internal/ceres/single_linkage_clustering.cc
@@ -0,0 +1,110 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+#include "ceres/single_linkage_clustering.h"
+
+#include "ceres/graph.h"
+#include "ceres/collections_port.h"
+#include "ceres/graph_algorithms.h"
+
+namespace ceres {
+namespace internal {
+
+int ComputeSingleLinkageClustering(
+ const SingleLinkageClusteringOptions& options,
+ const Graph<int>& graph,
+ HashMap<int, int>* membership) {
+ CHECK_NOTNULL(membership)->clear();
+
+ // Initially each vertex is in its own cluster.
+ const HashSet<int>& vertices = graph.vertices();
+ for (HashSet<int>::const_iterator it = vertices.begin();
+ it != vertices.end();
+ ++it) {
+ (*membership)[*it] = *it;
+ }
+
+ for (HashSet<int>::const_iterator it1 = vertices.begin();
+ it1 != vertices.end();
+ ++it1) {
+ const int vertex1 = *it1;
+ const HashSet<int>& neighbors = graph.Neighbors(vertex1);
+ for (HashSet<int>::const_iterator it2 = neighbors.begin();
+ it2 != neighbors.end();
+ ++it2) {
+ const int vertex2 = *it2;
+
+ // Since the graph is undirected, only pay attention to one side
+ // of the edge and ignore weak edges.
+ if ((vertex1 > vertex2) ||
+ (graph.EdgeWeight(vertex1, vertex2) < options.min_similarity)) {
+ continue;
+ }
+
+ // Use a union-find algorithm to keep track of the clusters.
+ const int c1 = FindConnectedComponent(vertex1, membership);
+ const int c2 = FindConnectedComponent(vertex2, membership);
+
+ if (c1 == c2) {
+ continue;
+ }
+
+ if (c1 < c2) {
+ (*membership)[c2] = c1;
+ } else {
+ (*membership)[c1] = c2;
+ }
+ }
+ }
+
+ // Make sure that every vertex is connected directly to the vertex
+ // identifying the cluster.
+ int num_clusters = 0;
+ for (HashMap<int, int>::iterator it = membership->begin();
+ it != membership->end();
+ ++it) {
+ it->second = FindConnectedComponent(it->first, membership);
+ if (it->first == it->second) {
+ ++num_clusters;
+ }
+ }
+
+ return num_clusters;
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_NO_SUITESPARSE
diff --git a/internal/ceres/schur_ordering.h b/internal/ceres/single_linkage_clustering.h
index 1f9a4ff..e6fdeab 100644
--- a/internal/ceres/schur_ordering.h
+++ b/internal/ceres/single_linkage_clustering.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2013 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -27,48 +27,48 @@
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-//
-// Compute a parameter block ordering for use with the Schur
-// complement based algorithms.
-#ifndef CERES_INTERNAL_SCHUR_ORDERING_H_
-#define CERES_INTERNAL_SCHUR_ORDERING_H_
+#ifndef CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
+#define CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_SUITESPARSE
-#include <vector>
+#include "ceres/collections_port.h"
#include "ceres/graph.h"
-#include "ceres/types.h"
namespace ceres {
namespace internal {
-class Program;
-class ParameterBlock;
+struct SingleLinkageClusteringOptions {
+ SingleLinkageClusteringOptions()
+ : min_similarity(0.99) {
+ }
-// Uses an approximate independent set ordering to order the parameter
-// blocks of a problem so that it is suitable for use with Schur
-// complement based solvers. The output variable ordering contains an
-// ordering of the parameter blocks and the return value is size of
-// the independent set or the number of e_blocks (see
-// schur_complement_solver.h for an explanation). Constant parameters
-// are added to the end.
+ // Graph edges with edge weight less than min_similarity are ignored
+ // during the clustering process.
+ double min_similarity;
+};
+
+// Compute a partitioning of the vertices of the graph using the
+// single linkage clustering algorithm. Edges with weight less than
+// SingleLinkageClusteringOptions::min_similarity will be ignored.
//
-// The ordering vector has the structure
+// membership upon return will contain a mapping from the vertices of
+// the graph to an integer indicating the identity of the cluster that
+// it belongs to.
//
-// ordering = [independent set,
-// complement of the independent set,
-// fixed blocks]
-int ComputeSchurOrdering(const Program& program,
- vector<ParameterBlock* >* ordering);
-
-
-// Builds a graph on the parameter blocks of a Problem, whose
-// structure reflects the sparsity structure of the Hessian. Each
-// vertex corresponds to a parameter block in the Problem except for
-// parameter blocks that are marked constant. An edge connects two
-// parameter blocks, if they co-occur in a residual block.
-Graph<ParameterBlock*>* CreateHessianGraph(const Program& program);
+// The return value of this function is the number of clusters
+// identified by the algorithm.
+int ComputeSingleLinkageClustering(
+ const SingleLinkageClusteringOptions& options,
+ const Graph<int>& graph,
+ HashMap<int, int>* membership);
} // namespace internal
} // namespace ceres
-#endif // CERES_INTERNAL_SCHUR_ORDERING_H_
+#endif // CERES_NO_SUITESPARSE
+#endif // CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
diff --git a/internal/ceres/single_linkage_clustering_test.cc b/internal/ceres/single_linkage_clustering_test.cc
new file mode 100644
index 0000000..1cbc5be
--- /dev/null
+++ b/internal/ceres/single_linkage_clustering_test.cc
@@ -0,0 +1,132 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2013 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sameer Agarwal (sameeragarwal@google.com)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_SUITESPARSE
+
+#include "ceres/single_linkage_clustering.h"
+
+#include "ceres/collections_port.h"
+#include "ceres/graph.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(SingleLinkageClustering, GraphHasTwoComponents) {
+ Graph<int> graph;
+ const int kNumVertices = 6;
+ for (int i = 0; i < kNumVertices; ++i) {
+ graph.AddVertex(i);
+ }
+ // Graph structure:
+ //
+ // 0-1-2-3 4-5
+ graph.AddEdge(0, 1, 1.0);
+ graph.AddEdge(1, 2, 1.0);
+ graph.AddEdge(2, 3, 1.0);
+ graph.AddEdge(4, 5, 1.0);
+
+ SingleLinkageClusteringOptions options;
+ HashMap<int, int> membership;
+ ComputeSingleLinkageClustering(options, graph, &membership);
+ EXPECT_EQ(membership.size(), kNumVertices);
+
+ EXPECT_EQ(membership[1], membership[0]);
+ EXPECT_EQ(membership[2], membership[0]);
+ EXPECT_EQ(membership[3], membership[0]);
+ EXPECT_NE(membership[4], membership[0]);
+ EXPECT_NE(membership[5], membership[0]);
+ EXPECT_EQ(membership[4], membership[5]);
+}
+
+TEST(SingleLinkageClustering, ComponentWithWeakLink) {
+ Graph<int> graph;
+ const int kNumVertices = 6;
+ for (int i = 0; i < kNumVertices; ++i) {
+ graph.AddVertex(i);
+ }
+ // Graph structure:
+ //
+ // 0-1-2-3 4-5
+ graph.AddEdge(0, 1, 1.0);
+ graph.AddEdge(1, 2, 1.0);
+ graph.AddEdge(2, 3, 1.0);
+
+ // This component should break up into two.
+ graph.AddEdge(4, 5, 0.5);
+
+ SingleLinkageClusteringOptions options;
+ HashMap<int, int> membership;
+ ComputeSingleLinkageClustering(options, graph, &membership);
+ EXPECT_EQ(membership.size(), kNumVertices);
+
+ EXPECT_EQ(membership[1], membership[0]);
+ EXPECT_EQ(membership[2], membership[0]);
+ EXPECT_EQ(membership[3], membership[0]);
+ EXPECT_NE(membership[4], membership[0]);
+ EXPECT_NE(membership[5], membership[0]);
+ EXPECT_NE(membership[4], membership[5]);
+}
+
+TEST(SingleLinkageClustering, ComponentWithWeakLinkAndStrongLink) {
+ Graph<int> graph;
+ const int kNumVertices = 6;
+ for (int i = 0; i < kNumVertices; ++i) {
+ graph.AddVertex(i);
+ }
+ // Graph structure:
+ //
+ // 0-1-2-3 4-5
+ graph.AddEdge(0, 1, 1.0);
+ graph.AddEdge(1, 2, 1.0);
+ graph.AddEdge(2, 3, 0.5); // Weak link
+ graph.AddEdge(0, 3, 1.0);
+
+ // This component should break up into two.
+ graph.AddEdge(4, 5, 1.0);
+
+ SingleLinkageClusteringOptions options;
+ HashMap<int, int> membership;
+ ComputeSingleLinkageClustering(options, graph, &membership);
+ EXPECT_EQ(membership.size(), kNumVertices);
+
+ EXPECT_EQ(membership[1], membership[0]);
+ EXPECT_EQ(membership[2], membership[0]);
+ EXPECT_EQ(membership[3], membership[0]);
+ EXPECT_EQ(membership[4], membership[5]);
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_NO_SUITESPARSE
diff --git a/internal/ceres/small_blas.h b/internal/ceres/small_blas.h
index e14e664..5639664 100644
--- a/internal/ceres/small_blas.h
+++ b/internal/ceres/small_blas.h
@@ -35,36 +35,13 @@
#ifndef CERES_INTERNAL_SMALL_BLAS_H_
#define CERES_INTERNAL_SMALL_BLAS_H_
+#include "ceres/internal/port.h"
#include "ceres/internal/eigen.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
-// Remove the ".noalias()" annotation from the matrix matrix
-// mutliplies to produce a correct build with the Android NDK,
-// including versions 6, 7, 8, and 8b, when built with STLPort and the
-// non-standalone toolchain (i.e. ndk-build). This appears to be a
-// compiler bug; if the workaround is not in place, the line
-//
-// block.noalias() -= A * B;
-//
-// gets compiled to
-//
-// block.noalias() += A * B;
-//
-// which breaks schur elimination. Introducing a temporary by removing the
-// .noalias() annotation causes the issue to disappear. Tracking this
-// issue down was tricky, since the test suite doesn't run when built with
-// the non-standalone toolchain.
-//
-// TODO(keir): Make a reproduction case for this and send it upstream.
-#ifdef CERES_WORK_AROUND_ANDROID_NDK_COMPILER_BUG
-#define CERES_MAYBE_NOALIAS
-#else
-#define CERES_MAYBE_NOALIAS .noalias()
-#endif
-
// The following three macros are used to share code and reduce
// template junk across the various GEMM variants.
#define CERES_GEMM_BEGIN(name) \
@@ -167,11 +144,11 @@ CERES_GEMM_BEGIN(MatrixMatrixMultiplyEigen) {
block(Cref, start_row_c, start_col_c, num_row_a, num_col_b);
if (kOperation > 0) {
- block CERES_MAYBE_NOALIAS += Aref * Bref;
+ block.noalias() += Aref * Bref;
} else if (kOperation < 0) {
- block CERES_MAYBE_NOALIAS -= Aref * Bref;
+ block.noalias() -= Aref * Bref;
} else {
- block CERES_MAYBE_NOALIAS = Aref * Bref;
+ block.noalias() = Aref * Bref;
}
}
@@ -227,11 +204,11 @@ CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiplyEigen) {
start_row_c, start_col_c,
num_col_a, num_col_b);
if (kOperation > 0) {
- block CERES_MAYBE_NOALIAS += Aref.transpose() * Bref;
+ block.noalias() += Aref.transpose() * Bref;
} else if (kOperation < 0) {
- block CERES_MAYBE_NOALIAS -= Aref.transpose() * Bref;
+ block.noalias() -= Aref.transpose() * Bref;
} else {
- block CERES_MAYBE_NOALIAS = Aref.transpose() * Bref;
+ block.noalias() = Aref.transpose() * Bref;
}
}
@@ -393,8 +370,6 @@ inline void MatrixTransposeVectorMultiply(const double* A,
#endif // CERES_NO_CUSTOM_BLAS
}
-
-#undef CERES_MAYBE_NOALIAS
#undef CERES_GEMM_BEGIN
#undef CERES_GEMM_EIGEN_HEADER
#undef CERES_GEMM_NAIVE_HEADER
diff --git a/internal/ceres/solver.cc b/internal/ceres/solver.cc
index 3b67746..3a57084 100644
--- a/internal/ceres/solver.cc
+++ b/internal/ceres/solver.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -29,19 +29,257 @@
// Author: keir@google.com (Keir Mierle)
// sameeragarwal@google.com (Sameer Agarwal)
+#include "ceres/internal/port.h"
#include "ceres/solver.h"
+#include <sstream> // NOLINT
#include <vector>
+
#include "ceres/problem.h"
#include "ceres/problem_impl.h"
#include "ceres/program.h"
#include "ceres/solver_impl.h"
#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+#include "ceres/version.h"
#include "ceres/wall_time.h"
namespace ceres {
namespace {
+#define OPTION_OP(x, y, OP) \
+ if (!(options.x OP y)) { \
+ std::stringstream ss; \
+ ss << "Invalid configuration. "; \
+ ss << string("Solver::Options::" #x " = ") << options.x << ". "; \
+ ss << "Violated constraint: "; \
+ ss << string("Solver::Options::" #x " " #OP " "#y); \
+ *error = ss.str(); \
+ return false; \
+ }
+
+#define OPTION_OP_OPTION(x, y, OP) \
+ if (!(options.x OP options.y)) { \
+ std::stringstream ss; \
+ ss << "Invalid configuration. "; \
+ ss << string("Solver::Options::" #x " = ") << options.x << ". "; \
+ ss << string("Solver::Options::" #y " = ") << options.y << ". "; \
+ ss << "Violated constraint: "; \
+ ss << string("Solver::Options::" #x ); \
+ ss << string(#OP " Solver::Options::" #y "."); \
+ *error = ss.str(); \
+ return false; \
+ }
+
+#define OPTION_GE(x, y) OPTION_OP(x, y, >=);
+#define OPTION_GT(x, y) OPTION_OP(x, y, >);
+#define OPTION_LE(x, y) OPTION_OP(x, y, <=);
+#define OPTION_LT(x, y) OPTION_OP(x, y, <);
+#define OPTION_LE_OPTION(x, y) OPTION_OP_OPTION(x ,y, <=)
+#define OPTION_LT_OPTION(x, y) OPTION_OP_OPTION(x ,y, <)
+
+bool CommonOptionsAreValid(const Solver::Options& options, string* error) {
+ OPTION_GE(max_num_iterations, 0);
+ OPTION_GE(max_solver_time_in_seconds, 0.0);
+ OPTION_GE(function_tolerance, 0.0);
+ OPTION_GE(gradient_tolerance, 0.0);
+ OPTION_GE(parameter_tolerance, 0.0);
+ OPTION_GT(num_threads, 0);
+ OPTION_GT(num_linear_solver_threads, 0);
+ if (options.check_gradients) {
+ OPTION_GT(gradient_check_relative_precision, 0.0);
+ OPTION_GT(numeric_derivative_relative_step_size, 0.0);
+ }
+ return true;
+}
+
+bool TrustRegionOptionsAreValid(const Solver::Options& options, string* error) {
+ OPTION_GT(initial_trust_region_radius, 0.0);
+ OPTION_GT(min_trust_region_radius, 0.0);
+ OPTION_GT(max_trust_region_radius, 0.0);
+ OPTION_LE_OPTION(min_trust_region_radius, max_trust_region_radius);
+ OPTION_LE_OPTION(min_trust_region_radius, initial_trust_region_radius);
+ OPTION_LE_OPTION(initial_trust_region_radius, max_trust_region_radius);
+ OPTION_GE(min_relative_decrease, 0.0);
+ OPTION_GE(min_lm_diagonal, 0.0);
+ OPTION_GE(max_lm_diagonal, 0.0);
+ OPTION_LE_OPTION(min_lm_diagonal, max_lm_diagonal);
+ OPTION_GE(max_num_consecutive_invalid_steps, 0);
+ OPTION_GT(eta, 0.0);
+ OPTION_GE(min_linear_solver_iterations, 1);
+ OPTION_GE(max_linear_solver_iterations, 1);
+ OPTION_LE_OPTION(min_linear_solver_iterations, max_linear_solver_iterations);
+
+ if (options.use_inner_iterations) {
+ OPTION_GE(inner_iteration_tolerance, 0.0);
+ }
+
+ if (options.use_nonmonotonic_steps) {
+ OPTION_GT(max_consecutive_nonmonotonic_steps, 0);
+ }
+
+ if (options.preconditioner_type == CLUSTER_JACOBI &&
+ options.sparse_linear_algebra_library_type != SUITE_SPARSE) {
+ *error = "CLUSTER_JACOBI requires "
+ "Solver::Options::sparse_linear_algebra_library_type to be "
+ "SUITE_SPARSE";
+ return false;
+ }
+
+ if (options.preconditioner_type == CLUSTER_TRIDIAGONAL &&
+ options.sparse_linear_algebra_library_type != SUITE_SPARSE) {
+ *error = "CLUSTER_TRIDIAGONAL requires "
+ "Solver::Options::sparse_linear_algebra_library_type to be "
+ "SUITE_SPARSE";
+ return false;
+ }
+
+#ifdef CERES_NO_LAPACK
+ if (options.dense_linear_algebra_library_type == LAPACK) {
+ if (options.linear_solver_type == DENSE_NORMAL_CHOLESKY) {
+ *error = "Can't use DENSE_NORMAL_CHOLESKY with LAPACK because "
+ "LAPACK was not enabled when Ceres was built.";
+ return false;
+ }
+
+ if (options.linear_solver_type == DENSE_QR) {
+ *error = "Can't use DENSE_QR with LAPACK because "
+ "LAPACK was not enabled when Ceres was built.";
+ return false;
+ }
+
+ if (options.linear_solver_type == DENSE_SCHUR) {
+ *error = "Can't use DENSE_SCHUR with LAPACK because "
+ "LAPACK was not enabled when Ceres was built.";
+ return false;
+ }
+ }
+#endif
+
+#ifdef CERES_NO_SUITESPARSE
+ if (options.sparse_linear_algebra_library_type == SUITE_SPARSE) {
+ if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
+ *error = "Can't use SPARSE_NORMAL_CHOLESKY with SUITESPARSE because "
+ "SuiteSparse was not enabled when Ceres was built.";
+ return false;
+ }
+
+ if (options.linear_solver_type == SPARSE_SCHUR) {
+ *error = "Can't use SPARSE_SCHUR with SUITESPARSE because "
+ "SuiteSparse was not enabled when Ceres was built.";
+ return false;
+ }
+
+ if (options.preconditioner_type == CLUSTER_JACOBI) {
+ *error = "CLUSTER_JACOBI preconditioner not supported. "
+ "SuiteSparse was not enabled when Ceres was built.";
+ return false;
+ }
+
+ if (options.preconditioner_type == CLUSTER_TRIDIAGONAL) {
+ *error = "CLUSTER_TRIDIAGONAL preconditioner not supported. "
+ "SuiteSparse was not enabled when Ceres was built.";
+ return false;
+ }
+ }
+#endif
+
+#ifdef CERES_NO_CXSPARSE
+ if (options.sparse_linear_algebra_library_type == CX_SPARSE) {
+ if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
+ *error = "Can't use SPARSE_NORMAL_CHOLESKY with CX_SPARSE because "
+ "CXSparse was not enabled when Ceres was built.";
+ return false;
+ }
+
+ if (options.linear_solver_type == SPARSE_SCHUR) {
+ *error = "Can't use SPARSE_SCHUR with CX_SPARSE because "
+ "CXSparse was not enabled when Ceres was built.";
+ return false;
+ }
+ }
+#endif
+
+ if (options.trust_region_strategy_type == DOGLEG) {
+ if (options.linear_solver_type == ITERATIVE_SCHUR ||
+ options.linear_solver_type == CGNR) {
+ *error = "DOGLEG only supports exact factorization based linear "
+ "solvers. If you want to use an iterative solver please "
+ "use LEVENBERG_MARQUARDT as the trust_region_strategy_type";
+ return false;
+ }
+ }
+
+ if (options.trust_region_minimizer_iterations_to_dump.size() > 0 &&
+ options.trust_region_problem_dump_format_type != CONSOLE &&
+ options.trust_region_problem_dump_directory.empty()) {
+ *error = "Solver::Options::trust_region_problem_dump_directory is empty.";
+ return false;
+ }
+
+ if (options.dynamic_sparsity &&
+ options.linear_solver_type != SPARSE_NORMAL_CHOLESKY) {
+ *error = "Dynamic sparsity is only supported with SPARSE_NORMAL_CHOLESKY.";
+ return false;
+ }
+
+ return true;
+}
+
+bool LineSearchOptionsAreValid(const Solver::Options& options, string* error) {
+ OPTION_GT(max_lbfgs_rank, 0);
+ OPTION_GT(min_line_search_step_size, 0.0);
+ OPTION_GT(max_line_search_step_contraction, 0.0);
+ OPTION_LT(max_line_search_step_contraction, 1.0);
+ OPTION_LT_OPTION(max_line_search_step_contraction,
+ min_line_search_step_contraction);
+ OPTION_LE(min_line_search_step_contraction, 1.0);
+ OPTION_GT(max_num_line_search_step_size_iterations, 0);
+ OPTION_GT(line_search_sufficient_function_decrease, 0.0);
+ OPTION_LT_OPTION(line_search_sufficient_function_decrease,
+ line_search_sufficient_curvature_decrease);
+ OPTION_LT(line_search_sufficient_curvature_decrease, 1.0);
+ OPTION_GT(max_line_search_step_expansion, 1.0);
+
+ if ((options.line_search_direction_type == ceres::BFGS ||
+ options.line_search_direction_type == ceres::LBFGS) &&
+ options.line_search_type != ceres::WOLFE) {
+
+ *error =
+ string("Invalid configuration: Solver::Options::line_search_type = ")
+ + string(LineSearchTypeToString(options.line_search_type))
+ + string(". When using (L)BFGS, "
+ "Solver::Options::line_search_type must be set to WOLFE.");
+ return false;
+ }
+
+ // Warn user if they have requested BISECTION interpolation, but constraints
+ // on max/min step size change during line search prevent bisection scaling
+ // from occurring. Warn only, as this is likely a user mistake, but one which
+ // does not prevent us from continuing.
+ LOG_IF(WARNING,
+ (options.line_search_interpolation_type == ceres::BISECTION &&
+ (options.max_line_search_step_contraction > 0.5 ||
+ options.min_line_search_step_contraction < 0.5)))
+ << "Line search interpolation type is BISECTION, but specified "
+ << "max_line_search_step_contraction: "
+ << options.max_line_search_step_contraction << ", and "
+ << "min_line_search_step_contraction: "
+ << options.min_line_search_step_contraction
+ << ", prevent bisection (0.5) scaling, continuing with solve regardless.";
+
+ return true;
+}
+
+#undef OPTION_OP
+#undef OPTION_OP_OPTION
+#undef OPTION_GT
+#undef OPTION_GE
+#undef OPTION_LE
+#undef OPTION_LT
+#undef OPTION_LE_OPTION
+#undef OPTION_LT_OPTION
+
void StringifyOrdering(const vector<int>& ordering, string* report) {
if (ordering.size() == 0) {
internal::StringAppendF(report, "AUTOMATIC");
@@ -54,11 +292,19 @@ void StringifyOrdering(const vector<int>& ordering, string* report) {
internal::StringAppendF(report, "%d", ordering.back());
}
-} // namespace
+} // namespace
+
+bool Solver::Options::IsValid(string* error) const {
+ if (!CommonOptionsAreValid(*this, error)) {
+ return false;
+ }
-Solver::Options::~Options() {
- delete linear_solver_ordering;
- delete inner_iteration_ordering;
+ if (minimizer_type == TRUST_REGION) {
+ return TrustRegionOptionsAreValid(*this, error);
+ }
+
+ CHECK_EQ(minimizer_type, LINE_SEARCH);
+ return LineSearchOptionsAreValid(*this, error);
}
Solver::~Solver() {}
@@ -67,8 +313,16 @@ void Solver::Solve(const Solver::Options& options,
Problem* problem,
Solver::Summary* summary) {
double start_time_seconds = internal::WallTimeInSeconds();
- internal::ProblemImpl* problem_impl =
- CHECK_NOTNULL(problem)->problem_impl_.get();
+ CHECK_NOTNULL(problem);
+ CHECK_NOTNULL(summary);
+
+ *summary = Summary();
+ if (!options.IsValid(&summary->message)) {
+ LOG(ERROR) << "Terminating: " << summary->message;
+ return;
+ }
+
+ internal::ProblemImpl* problem_impl = problem->problem_impl_.get();
internal::SolverImpl::Solve(options, problem_impl, summary);
summary->total_time_in_seconds =
internal::WallTimeInSeconds() - start_time_seconds;
@@ -85,7 +339,8 @@ Solver::Summary::Summary()
// Invalid values for most fields, to ensure that we are not
// accidentally reporting default values.
: minimizer_type(TRUST_REGION),
- termination_type(DID_NOT_RUN),
+ termination_type(FAILURE),
+ message("ceres::Solve was not called."),
initial_cost(-1.0),
final_cost(-1.0),
fixed_cost(-1.0),
@@ -119,75 +374,51 @@ Solver::Summary::Summary()
inner_iterations_given(false),
inner_iterations_used(false),
preconditioner_type(IDENTITY),
+ visibility_clustering_type(CANONICAL_VIEWS),
trust_region_strategy_type(LEVENBERG_MARQUARDT),
dense_linear_algebra_library_type(EIGEN),
sparse_linear_algebra_library_type(SUITE_SPARSE),
line_search_direction_type(LBFGS),
- line_search_type(ARMIJO) {
+ line_search_type(ARMIJO),
+ line_search_interpolation_type(BISECTION),
+ nonlinear_conjugate_gradient_type(FLETCHER_REEVES),
+ max_lbfgs_rank(-1) {
}
-string Solver::Summary::BriefReport() const {
- string report = "Ceres Solver Report: ";
- if (termination_type == DID_NOT_RUN) {
- CHECK(!error.empty())
- << "Solver terminated with DID_NOT_RUN but the solver did not "
- << "return a reason. This is a Ceres error. Please report this "
- << "to the Ceres team";
- return report + "Termination: DID_NOT_RUN, because " + error;
- }
-
- internal::StringAppendF(&report, "Iterations: %d",
- num_successful_steps + num_unsuccessful_steps);
- internal::StringAppendF(&report, ", Initial cost: %e", initial_cost);
-
- // If the solver failed or was aborted, then the final_cost has no
- // meaning.
- if (termination_type != NUMERICAL_FAILURE &&
- termination_type != USER_ABORT) {
- internal::StringAppendF(&report, ", Final cost: %e", final_cost);
- }
-
- internal::StringAppendF(&report, ", Termination: %s.",
- SolverTerminationTypeToString(termination_type));
- return report;
-};
-
using internal::StringAppendF;
using internal::StringPrintf;
+string Solver::Summary::BriefReport() const {
+ return StringPrintf("Ceres Solver Report: "
+ "Iterations: %d, "
+ "Initial cost: %e, "
+ "Final cost: %e, "
+ "Termination: %s",
+ num_successful_steps + num_unsuccessful_steps,
+ initial_cost,
+ final_cost,
+ TerminationTypeToString(termination_type));
+};
+
string Solver::Summary::FullReport() const {
string report =
"\n"
- "Ceres Solver Report\n"
- "-------------------\n";
-
- if (termination_type == DID_NOT_RUN) {
- StringAppendF(&report, " Original\n");
- StringAppendF(&report, "Parameter blocks % 10d\n", num_parameter_blocks);
- StringAppendF(&report, "Parameters % 10d\n", num_parameters);
- if (num_effective_parameters != num_parameters) {
- StringAppendF(&report, "Effective parameters% 10d\n", num_parameters);
- }
-
- StringAppendF(&report, "Residual blocks % 10d\n",
- num_residual_blocks);
- StringAppendF(&report, "Residuals % 10d\n\n",
- num_residuals);
- } else {
- StringAppendF(&report, "%45s %21s\n", "Original", "Reduced");
- StringAppendF(&report, "Parameter blocks % 25d% 25d\n",
- num_parameter_blocks, num_parameter_blocks_reduced);
- StringAppendF(&report, "Parameters % 25d% 25d\n",
- num_parameters, num_parameters_reduced);
- if (num_effective_parameters_reduced != num_parameters_reduced) {
- StringAppendF(&report, "Effective parameters% 25d% 25d\n",
- num_effective_parameters, num_effective_parameters_reduced);
- }
- StringAppendF(&report, "Residual blocks % 25d% 25d\n",
- num_residual_blocks, num_residual_blocks_reduced);
- StringAppendF(&report, "Residual % 25d% 25d\n",
- num_residuals, num_residuals_reduced);
+ "Ceres Solver v" CERES_VERSION_STRING " Solve Report\n"
+ "----------------------------------\n";
+
+ StringAppendF(&report, "%45s %21s\n", "Original", "Reduced");
+ StringAppendF(&report, "Parameter blocks % 25d% 25d\n",
+ num_parameter_blocks, num_parameter_blocks_reduced);
+ StringAppendF(&report, "Parameters % 25d% 25d\n",
+ num_parameters, num_parameters_reduced);
+ if (num_effective_parameters_reduced != num_parameters_reduced) {
+ StringAppendF(&report, "Effective parameters% 25d% 25d\n",
+ num_effective_parameters, num_effective_parameters_reduced);
}
+ StringAppendF(&report, "Residual blocks % 25d% 25d\n",
+ num_residual_blocks, num_residual_blocks_reduced);
+ StringAppendF(&report, "Residual % 25d% 25d\n",
+ num_residuals, num_residuals_reduced);
if (minimizer_type == TRUST_REGION) {
// TRUST_SEARCH HEADER
@@ -237,6 +468,14 @@ string Solver::Summary::FullReport() const {
PreconditionerTypeToString(preconditioner_type));
}
+ if (preconditioner_type == CLUSTER_JACOBI ||
+ preconditioner_type == CLUSTER_TRIDIAGONAL) {
+ StringAppendF(&report, "Visibility clustering%24s%25s\n",
+ VisibilityClusteringTypeToString(
+ visibility_clustering_type),
+ VisibilityClusteringTypeToString(
+ visibility_clustering_type));
+ }
StringAppendF(&report, "Threads % 25d% 25d\n",
num_threads_given, num_threads_used);
StringAppendF(&report, "Linear solver threads % 23d% 25d\n",
@@ -305,21 +544,10 @@ string Solver::Summary::FullReport() const {
num_threads_given, num_threads_used);
}
- if (termination_type == DID_NOT_RUN) {
- CHECK(!error.empty())
- << "Solver terminated with DID_NOT_RUN but the solver did not "
- << "return a reason. This is a Ceres error. Please report this "
- << "to the Ceres team";
- StringAppendF(&report, "Termination: %20s\n",
- "DID_NOT_RUN");
- StringAppendF(&report, "Reason: %s\n", error.c_str());
- return report;
- }
-
StringAppendF(&report, "\nCost:\n");
StringAppendF(&report, "Initial % 30e\n", initial_cost);
- if (termination_type != NUMERICAL_FAILURE &&
- termination_type != USER_ABORT) {
+ if (termination_type != FAILURE &&
+ termination_type != USER_FAILURE) {
StringAppendF(&report, "Final % 30e\n", final_cost);
StringAppendF(&report, "Change % 30e\n",
initial_cost - final_cost);
@@ -370,9 +598,15 @@ string Solver::Summary::FullReport() const {
StringAppendF(&report, "Total %25.3f\n\n",
total_time_in_seconds);
- StringAppendF(&report, "Termination: %25s\n",
- SolverTerminationTypeToString(termination_type));
+ StringAppendF(&report, "Termination: %25s (%s)\n",
+ TerminationTypeToString(termination_type), message.c_str());
return report;
};
+bool Solver::Summary::IsSolutionUsable() const {
+ return (termination_type == CONVERGENCE ||
+ termination_type == NO_CONVERGENCE ||
+ termination_type == USER_SUCCESS);
+}
+
} // namespace ceres
diff --git a/internal/ceres/solver_impl.cc b/internal/ceres/solver_impl.cc
index 83faa05..a1cf4ca 100644
--- a/internal/ceres/solver_impl.cc
+++ b/internal/ceres/solver_impl.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -34,6 +34,8 @@
#include <iostream> // NOLINT
#include <numeric>
#include <string>
+#include "ceres/array_utils.h"
+#include "ceres/callbacks.h"
#include "ceres/coordinate_descent_minimizer.h"
#include "ceres/cxsparse.h"
#include "ceres/evaluator.h"
@@ -47,168 +49,20 @@
#include "ceres/ordered_groups.h"
#include "ceres/parameter_block.h"
#include "ceres/parameter_block_ordering.h"
+#include "ceres/preconditioner.h"
#include "ceres/problem.h"
#include "ceres/problem_impl.h"
#include "ceres/program.h"
+#include "ceres/reorder_program.h"
#include "ceres/residual_block.h"
#include "ceres/stringprintf.h"
#include "ceres/suitesparse.h"
+#include "ceres/summary_utils.h"
#include "ceres/trust_region_minimizer.h"
#include "ceres/wall_time.h"
namespace ceres {
namespace internal {
-namespace {
-
-// Callback for updating the user's parameter blocks. Updates are only
-// done if the step is successful.
-class StateUpdatingCallback : public IterationCallback {
- public:
- StateUpdatingCallback(Program* program, double* parameters)
- : program_(program), parameters_(parameters) {}
-
- CallbackReturnType operator()(const IterationSummary& summary) {
- if (summary.step_is_successful) {
- program_->StateVectorToParameterBlocks(parameters_);
- program_->CopyParameterBlockStateToUserState();
- }
- return SOLVER_CONTINUE;
- }
-
- private:
- Program* program_;
- double* parameters_;
-};
-
-void SetSummaryFinalCost(Solver::Summary* summary) {
- summary->final_cost = summary->initial_cost;
- // We need the loop here, instead of just looking at the last
- // iteration because the minimizer maybe making non-monotonic steps.
- for (int i = 0; i < summary->iterations.size(); ++i) {
- const IterationSummary& iteration_summary = summary->iterations[i];
- summary->final_cost = min(iteration_summary.cost, summary->final_cost);
- }
-}
-
-// Callback for logging the state of the minimizer to STDERR or STDOUT
-// depending on the user's preferences and logging level.
-class TrustRegionLoggingCallback : public IterationCallback {
- public:
- explicit TrustRegionLoggingCallback(bool log_to_stdout)
- : log_to_stdout_(log_to_stdout) {}
-
- ~TrustRegionLoggingCallback() {}
-
- CallbackReturnType operator()(const IterationSummary& summary) {
- const char* kReportRowFormat =
- "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
- "rho:% 3.2e mu:% 3.2e li:% 3d it:% 3.2e tt:% 3.2e";
- string output = StringPrintf(kReportRowFormat,
- summary.iteration,
- summary.cost,
- summary.cost_change,
- summary.gradient_max_norm,
- summary.step_norm,
- summary.relative_decrease,
- summary.trust_region_radius,
- summary.linear_solver_iterations,
- summary.iteration_time_in_seconds,
- summary.cumulative_time_in_seconds);
- if (log_to_stdout_) {
- cout << output << endl;
- } else {
- VLOG(1) << output;
- }
- return SOLVER_CONTINUE;
- }
-
- private:
- const bool log_to_stdout_;
-};
-
-// Callback for logging the state of the minimizer to STDERR or STDOUT
-// depending on the user's preferences and logging level.
-class LineSearchLoggingCallback : public IterationCallback {
- public:
- explicit LineSearchLoggingCallback(bool log_to_stdout)
- : log_to_stdout_(log_to_stdout) {}
-
- ~LineSearchLoggingCallback() {}
-
- CallbackReturnType operator()(const IterationSummary& summary) {
- const char* kReportRowFormat =
- "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
- "s:% 3.2e e:% 3d it:% 3.2e tt:% 3.2e";
- string output = StringPrintf(kReportRowFormat,
- summary.iteration,
- summary.cost,
- summary.cost_change,
- summary.gradient_max_norm,
- summary.step_norm,
- summary.step_size,
- summary.line_search_function_evaluations,
- summary.iteration_time_in_seconds,
- summary.cumulative_time_in_seconds);
- if (log_to_stdout_) {
- cout << output << endl;
- } else {
- VLOG(1) << output;
- }
- return SOLVER_CONTINUE;
- }
-
- private:
- const bool log_to_stdout_;
-};
-
-
-// Basic callback to record the execution of the solver to a file for
-// offline analysis.
-class FileLoggingCallback : public IterationCallback {
- public:
- explicit FileLoggingCallback(const string& filename)
- : fptr_(NULL) {
- fptr_ = fopen(filename.c_str(), "w");
- CHECK_NOTNULL(fptr_);
- }
-
- virtual ~FileLoggingCallback() {
- if (fptr_ != NULL) {
- fclose(fptr_);
- }
- }
-
- virtual CallbackReturnType operator()(const IterationSummary& summary) {
- fprintf(fptr_,
- "%4d %e %e\n",
- summary.iteration,
- summary.cost,
- summary.cumulative_time_in_seconds);
- return SOLVER_CONTINUE;
- }
- private:
- FILE* fptr_;
-};
-
-// Iterate over each of the groups in order of their priority and fill
-// summary with their sizes.
-void SummarizeOrdering(ParameterBlockOrdering* ordering,
- vector<int>* summary) {
- CHECK_NOTNULL(summary)->clear();
- if (ordering == NULL) {
- return;
- }
-
- const map<int, set<double*> >& group_to_elements =
- ordering->group_to_elements();
- for (map<int, set<double*> >::const_iterator it = group_to_elements.begin();
- it != group_to_elements.end();
- ++it) {
- summary->push_back(it->second.size());
- }
-}
-
-} // namespace
void SolverImpl::TrustRegionMinimize(
const Solver::Options& options,
@@ -216,27 +70,26 @@ void SolverImpl::TrustRegionMinimize(
CoordinateDescentMinimizer* inner_iteration_minimizer,
Evaluator* evaluator,
LinearSolver* linear_solver,
- double* parameters,
Solver::Summary* summary) {
Minimizer::Options minimizer_options(options);
+ minimizer_options.is_constrained = program->IsBoundsConstrained();
- // TODO(sameeragarwal): Add support for logging the configuration
- // and more detailed stats.
- scoped_ptr<IterationCallback> file_logging_callback;
- if (!options.solver_log.empty()) {
- file_logging_callback.reset(new FileLoggingCallback(options.solver_log));
- minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
- file_logging_callback.get());
- }
+ // The optimizer works on contiguous parameter vectors; allocate
+ // some.
+ Vector parameters(program->NumParameters());
+
+ // Collect the discontiguous parameters into a contiguous state
+ // vector.
+ program->ParameterBlocksToStateVector(parameters.data());
- TrustRegionLoggingCallback logging_callback(
- options.minimizer_progress_to_stdout);
+ LoggingCallback logging_callback(TRUST_REGION,
+ options.minimizer_progress_to_stdout);
if (options.logging_type != SILENT) {
minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
&logging_callback);
}
- StateUpdatingCallback updating_callback(program, parameters);
+ StateUpdatingCallback updating_callback(program, parameters.data());
if (options.update_state_every_iteration) {
// This must get pushed to the front of the callbacks so that it is run
// before any of the user callbacks.
@@ -266,37 +119,42 @@ void SolverImpl::TrustRegionMinimize(
TrustRegionMinimizer minimizer;
double minimizer_start_time = WallTimeInSeconds();
- minimizer.Minimize(minimizer_options, parameters, summary);
+ minimizer.Minimize(minimizer_options, parameters.data(), summary);
+
+ // If the user aborted mid-optimization or the optimization
+ // terminated because of a numerical failure, then do not update
+ // user state.
+ if (summary->termination_type != USER_FAILURE &&
+ summary->termination_type != FAILURE) {
+ program->StateVectorToParameterBlocks(parameters.data());
+ program->CopyParameterBlockStateToUserState();
+ }
+
summary->minimizer_time_in_seconds =
WallTimeInSeconds() - minimizer_start_time;
}
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
void SolverImpl::LineSearchMinimize(
const Solver::Options& options,
Program* program,
Evaluator* evaluator,
- double* parameters,
Solver::Summary* summary) {
Minimizer::Options minimizer_options(options);
- // TODO(sameeragarwal): Add support for logging the configuration
- // and more detailed stats.
- scoped_ptr<IterationCallback> file_logging_callback;
- if (!options.solver_log.empty()) {
- file_logging_callback.reset(new FileLoggingCallback(options.solver_log));
- minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
- file_logging_callback.get());
- }
+ // The optimizer works on contiguous parameter vectors; allocate some.
+ Vector parameters(program->NumParameters());
+
+ // Collect the discontiguous parameters into a contiguous state vector.
+ program->ParameterBlocksToStateVector(parameters.data());
- LineSearchLoggingCallback logging_callback(
- options.minimizer_progress_to_stdout);
+ LoggingCallback logging_callback(LINE_SEARCH,
+ options.minimizer_progress_to_stdout);
if (options.logging_type != SILENT) {
minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
&logging_callback);
}
- StateUpdatingCallback updating_callback(program, parameters);
+ StateUpdatingCallback updating_callback(program, parameters.data());
if (options.update_state_every_iteration) {
// This must get pushed to the front of the callbacks so that it is run
// before any of the user callbacks.
@@ -308,11 +166,20 @@ void SolverImpl::LineSearchMinimize(
LineSearchMinimizer minimizer;
double minimizer_start_time = WallTimeInSeconds();
- minimizer.Minimize(minimizer_options, parameters, summary);
+ minimizer.Minimize(minimizer_options, parameters.data(), summary);
+
+ // If the user aborted mid-optimization or the optimization
+ // terminated because of a numerical failure, then do not update
+ // user state.
+ if (summary->termination_type != USER_FAILURE &&
+ summary->termination_type != FAILURE) {
+ program->StateVectorToParameterBlocks(parameters.data());
+ program->CopyParameterBlockStateToUserState();
+ }
+
summary->minimizer_time_in_seconds =
WallTimeInSeconds() - minimizer_start_time;
}
-#endif // CERES_NO_LINE_SEARCH_MINIMIZER
void SolverImpl::Solve(const Solver::Options& options,
ProblemImpl* problem_impl,
@@ -326,15 +193,10 @@ void SolverImpl::Solve(const Solver::Options& options,
<< " residual blocks, "
<< problem_impl->NumResiduals()
<< " residuals.";
-
if (options.minimizer_type == TRUST_REGION) {
TrustRegionSolve(options, problem_impl, summary);
} else {
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
LineSearchSolve(options, problem_impl, summary);
-#else
- LOG(FATAL) << "Ceres Solver was compiled with -DLINE_SEARCH_MINIMIZER=OFF";
-#endif
}
}
@@ -347,39 +209,15 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
Program* original_program = original_problem_impl->mutable_program();
ProblemImpl* problem_impl = original_problem_impl;
- // Reset the summary object to its default values.
- *CHECK_NOTNULL(summary) = Solver::Summary();
-
summary->minimizer_type = TRUST_REGION;
- summary->num_parameter_blocks = problem_impl->NumParameterBlocks();
- summary->num_parameters = problem_impl->NumParameters();
- summary->num_effective_parameters =
- original_program->NumEffectiveParameters();
- summary->num_residual_blocks = problem_impl->NumResidualBlocks();
- summary->num_residuals = problem_impl->NumResiduals();
-
- // Empty programs are usually a user error.
- if (summary->num_parameter_blocks == 0) {
- summary->error = "Problem contains no parameter blocks.";
- LOG(ERROR) << summary->error;
- return;
- }
-
- if (summary->num_residual_blocks == 0) {
- summary->error = "Problem contains no residual blocks.";
- LOG(ERROR) << summary->error;
- return;
- }
-
- SummarizeOrdering(original_options.linear_solver_ordering,
- &(summary->linear_solver_ordering_given));
- SummarizeOrdering(original_options.inner_iteration_ordering,
- &(summary->inner_iteration_ordering_given));
+ SummarizeGivenProgram(*original_program, summary);
+ OrderingToGroupSizes(original_options.linear_solver_ordering.get(),
+ &(summary->linear_solver_ordering_given));
+ OrderingToGroupSizes(original_options.inner_iteration_ordering.get(),
+ &(summary->inner_iteration_ordering_given));
Solver::Options options(original_options);
- options.linear_solver_ordering = NULL;
- options.inner_iteration_ordering = NULL;
#ifndef CERES_USE_OPENMP
if (options.num_threads > 1) {
@@ -404,9 +242,19 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
if (options.trust_region_minimizer_iterations_to_dump.size() > 0 &&
options.trust_region_problem_dump_format_type != CONSOLE &&
options.trust_region_problem_dump_directory.empty()) {
- summary->error =
+ summary->message =
"Solver::Options::trust_region_problem_dump_directory is empty.";
- LOG(ERROR) << summary->error;
+ LOG(ERROR) << summary->message;
+ return;
+ }
+
+ if (!original_program->ParameterBlocksAreFinite(&summary->message)) {
+ LOG(ERROR) << "Terminating: " << summary->message;
+ return;
+ }
+
+ if (!original_program->IsFeasible(&summary->message)) {
+ LOG(ERROR) << "Terminating: " << summary->message;
return;
}
@@ -433,17 +281,14 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
problem_impl = gradient_checking_problem_impl.get();
}
- if (original_options.linear_solver_ordering != NULL) {
- if (!IsOrderingValid(original_options, problem_impl, &summary->error)) {
- LOG(ERROR) << summary->error;
+ if (options.linear_solver_ordering.get() != NULL) {
+ if (!IsOrderingValid(options, problem_impl, &summary->message)) {
+ LOG(ERROR) << summary->message;
return;
}
event_logger.AddEvent("CheckOrdering");
- options.linear_solver_ordering =
- new ParameterBlockOrdering(*original_options.linear_solver_ordering);
- event_logger.AddEvent("CopyOrdering");
} else {
- options.linear_solver_ordering = new ParameterBlockOrdering;
+ options.linear_solver_ordering.reset(new ParameterBlockOrdering);
const ProblemImpl::ParameterMap& parameter_map =
problem_impl->parameter_map();
for (ProblemImpl::ParameterMap::const_iterator it = parameter_map.begin();
@@ -459,41 +304,35 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
scoped_ptr<Program> reduced_program(CreateReducedProgram(&options,
problem_impl,
&summary->fixed_cost,
- &summary->error));
+ &summary->message));
event_logger.AddEvent("CreateReducedProgram");
if (reduced_program == NULL) {
return;
}
- SummarizeOrdering(options.linear_solver_ordering,
- &(summary->linear_solver_ordering_used));
-
- summary->num_parameter_blocks_reduced = reduced_program->NumParameterBlocks();
- summary->num_parameters_reduced = reduced_program->NumParameters();
- summary->num_effective_parameters_reduced =
- reduced_program->NumEffectiveParameters();
- summary->num_residual_blocks_reduced = reduced_program->NumResidualBlocks();
- summary->num_residuals_reduced = reduced_program->NumResiduals();
+ OrderingToGroupSizes(options.linear_solver_ordering.get(),
+ &(summary->linear_solver_ordering_used));
+ SummarizeReducedProgram(*reduced_program, summary);
if (summary->num_parameter_blocks_reduced == 0) {
summary->preprocessor_time_in_seconds =
WallTimeInSeconds() - solver_start_time;
double post_process_start_time = WallTimeInSeconds();
- LOG(INFO) << "Terminating: FUNCTION_TOLERANCE reached. "
- << "No non-constant parameter blocks found.";
+
+ summary->message =
+ "Function tolerance reached. "
+ "No non-constant parameter blocks found.";
+ summary->termination_type = CONVERGENCE;
+ VLOG_IF(1, options.logging_type != SILENT) << summary->message;
summary->initial_cost = summary->fixed_cost;
summary->final_cost = summary->fixed_cost;
- // FUNCTION_TOLERANCE is the right convergence here, as we know
- // that the objective function is constant and cannot be changed
- // any further.
- summary->termination_type = FUNCTION_TOLERANCE;
-
// Ensure the program state is set to the user parameters on the way out.
original_program->SetParameterBlockStatePtrsToUserStatePtrs();
+ original_program->SetParameterOffsetsAndIndex();
summary->postprocessor_time_in_seconds =
WallTimeInSeconds() - post_process_start_time;
@@ -501,7 +340,7 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
}
scoped_ptr<LinearSolver>
- linear_solver(CreateLinearSolver(&options, &summary->error));
+ linear_solver(CreateLinearSolver(&options, &summary->message));
event_logger.AddEvent("CreateLinearSolver");
if (linear_solver == NULL) {
return;
@@ -511,6 +350,7 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
summary->linear_solver_type_used = options.linear_solver_type;
summary->preconditioner_type = options.preconditioner_type;
+ summary->visibility_clustering_type = options.visibility_clustering_type;
summary->num_linear_solver_threads_given =
original_options.num_linear_solver_threads;
@@ -527,7 +367,7 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
scoped_ptr<Evaluator> evaluator(CreateEvaluator(options,
problem_impl->parameter_map(),
reduced_program.get(),
- &summary->error));
+ &summary->message));
event_logger.AddEvent("CreateEvaluator");
@@ -542,26 +382,18 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
<< "Disabling inner iterations.";
} else {
inner_iteration_minimizer.reset(
- CreateInnerIterationMinimizer(original_options,
+ CreateInnerIterationMinimizer(options,
*reduced_program,
problem_impl->parameter_map(),
summary));
if (inner_iteration_minimizer == NULL) {
- LOG(ERROR) << summary->error;
+ LOG(ERROR) << summary->message;
return;
}
}
}
event_logger.AddEvent("CreateInnerIterationMinimizer");
- // The optimizer works on contiguous parameter vectors; allocate some.
- Vector parameters(reduced_program->NumParameters());
-
- // Collect the discontiguous parameters into a contiguous state vector.
- reduced_program->ParameterBlocksToStateVector(parameters.data());
-
- Vector original_parameters = parameters;
-
double minimizer_start_time = WallTimeInSeconds();
summary->preprocessor_time_in_seconds =
minimizer_start_time - solver_start_time;
@@ -572,30 +404,17 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
inner_iteration_minimizer.get(),
evaluator.get(),
linear_solver.get(),
- parameters.data(),
summary);
event_logger.AddEvent("Minimize");
- SetSummaryFinalCost(summary);
-
- // If the user aborted mid-optimization or the optimization
- // terminated because of a numerical failure, then return without
- // updating user state.
- if (summary->termination_type == USER_ABORT ||
- summary->termination_type == NUMERICAL_FAILURE) {
- return;
- }
-
double post_process_start_time = WallTimeInSeconds();
- // Push the contiguous optimized parameters back to the user's
- // parameters.
- reduced_program->StateVectorToParameterBlocks(parameters.data());
- reduced_program->CopyParameterBlockStateToUserState();
+ SetSummaryFinalCost(summary);
// Ensure the program state is set to the user parameters on the way
// out.
original_program->SetParameterBlockStatePtrsToUserStatePtrs();
+ original_program->SetParameterOffsetsAndIndex();
const map<string, double>& linear_solver_time_statistics =
linear_solver->TimeStatistics();
@@ -618,8 +437,6 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options,
event_logger.AddEvent("PostProcess");
}
-
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
ProblemImpl* original_problem_impl,
Solver::Summary* summary) {
@@ -628,9 +445,7 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
Program* original_program = original_problem_impl->mutable_program();
ProblemImpl* problem_impl = original_problem_impl;
- // Reset the summary object to its default values.
- *CHECK_NOTNULL(summary) = Solver::Summary();
-
+ SummarizeGivenProgram(*original_program, summary);
summary->minimizer_type = LINE_SEARCH;
summary->line_search_direction_type =
original_options.line_search_direction_type;
@@ -641,104 +456,9 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
summary->nonlinear_conjugate_gradient_type =
original_options.nonlinear_conjugate_gradient_type;
- summary->num_parameter_blocks = original_program->NumParameterBlocks();
- summary->num_parameters = original_program->NumParameters();
- summary->num_residual_blocks = original_program->NumResidualBlocks();
- summary->num_residuals = original_program->NumResiduals();
- summary->num_effective_parameters =
- original_program->NumEffectiveParameters();
-
- // Validate values for configuration parameters supplied by user.
- if ((original_options.line_search_direction_type == ceres::BFGS ||
- original_options.line_search_direction_type == ceres::LBFGS) &&
- original_options.line_search_type != ceres::WOLFE) {
- summary->error =
- string("Invalid configuration: require line_search_type == "
- "ceres::WOLFE when using (L)BFGS to ensure that underlying "
- "assumptions are guaranteed to be satisfied.");
- LOG(ERROR) << summary->error;
- return;
- }
- if (original_options.max_lbfgs_rank <= 0) {
- summary->error =
- string("Invalid configuration: require max_lbfgs_rank > 0");
- LOG(ERROR) << summary->error;
- return;
- }
- if (original_options.min_line_search_step_size <= 0.0) {
- summary->error = "Invalid configuration: min_line_search_step_size <= 0.0.";
- LOG(ERROR) << summary->error;
- return;
- }
- if (original_options.line_search_sufficient_function_decrease <= 0.0) {
- summary->error =
- string("Invalid configuration: require ") +
- string("line_search_sufficient_function_decrease <= 0.0.");
- LOG(ERROR) << summary->error;
- return;
- }
- if (original_options.max_line_search_step_contraction <= 0.0 ||
- original_options.max_line_search_step_contraction >= 1.0) {
- summary->error = string("Invalid configuration: require ") +
- string("0.0 < max_line_search_step_contraction < 1.0.");
- LOG(ERROR) << summary->error;
- return;
- }
- if (original_options.min_line_search_step_contraction <=
- original_options.max_line_search_step_contraction ||
- original_options.min_line_search_step_contraction > 1.0) {
- summary->error = string("Invalid configuration: require ") +
- string("max_line_search_step_contraction < ") +
- string("min_line_search_step_contraction <= 1.0.");
- LOG(ERROR) << summary->error;
- return;
- }
- // Warn user if they have requested BISECTION interpolation, but constraints
- // on max/min step size change during line search prevent bisection scaling
- // from occurring. Warn only, as this is likely a user mistake, but one which
- // does not prevent us from continuing.
- LOG_IF(WARNING,
- (original_options.line_search_interpolation_type == ceres::BISECTION &&
- (original_options.max_line_search_step_contraction > 0.5 ||
- original_options.min_line_search_step_contraction < 0.5)))
- << "Line search interpolation type is BISECTION, but specified "
- << "max_line_search_step_contraction: "
- << original_options.max_line_search_step_contraction << ", and "
- << "min_line_search_step_contraction: "
- << original_options.min_line_search_step_contraction
- << ", prevent bisection (0.5) scaling, continuing with solve regardless.";
- if (original_options.max_num_line_search_step_size_iterations <= 0) {
- summary->error = string("Invalid configuration: require ") +
- string("max_num_line_search_step_size_iterations > 0.");
- LOG(ERROR) << summary->error;
- return;
- }
- if (original_options.line_search_sufficient_curvature_decrease <=
- original_options.line_search_sufficient_function_decrease ||
- original_options.line_search_sufficient_curvature_decrease > 1.0) {
- summary->error = string("Invalid configuration: require ") +
- string("line_search_sufficient_function_decrease < ") +
- string("line_search_sufficient_curvature_decrease < 1.0.");
- LOG(ERROR) << summary->error;
- return;
- }
- if (original_options.max_line_search_step_expansion <= 1.0) {
- summary->error = string("Invalid configuration: require ") +
- string("max_line_search_step_expansion > 1.0.");
- LOG(ERROR) << summary->error;
- return;
- }
-
- // Empty programs are usually a user error.
- if (summary->num_parameter_blocks == 0) {
- summary->error = "Problem contains no parameter blocks.";
- LOG(ERROR) << summary->error;
- return;
- }
-
- if (summary->num_residual_blocks == 0) {
- summary->error = "Problem contains no residual blocks.";
- LOG(ERROR) << summary->error;
+ if (original_program->IsBoundsConstrained()) {
+ summary->message = "LINE_SEARCH Minimizer does not support bounds.";
+ LOG(ERROR) << "Terminating: " << summary->message;
return;
}
@@ -750,8 +470,6 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
// line search.
options.linear_solver_type = CGNR;
- options.linear_solver_ordering = NULL;
- options.inner_iteration_ordering = NULL;
#ifndef CERES_USE_OPENMP
if (options.num_threads > 1) {
@@ -766,15 +484,18 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
summary->num_threads_given = original_options.num_threads;
summary->num_threads_used = options.num_threads;
- if (original_options.linear_solver_ordering != NULL) {
- if (!IsOrderingValid(original_options, problem_impl, &summary->error)) {
- LOG(ERROR) << summary->error;
+ if (!original_program->ParameterBlocksAreFinite(&summary->message)) {
+ LOG(ERROR) << "Terminating: " << summary->message;
+ return;
+ }
+
+ if (options.linear_solver_ordering.get() != NULL) {
+ if (!IsOrderingValid(options, problem_impl, &summary->message)) {
+ LOG(ERROR) << summary->message;
return;
}
- options.linear_solver_ordering =
- new ParameterBlockOrdering(*original_options.linear_solver_ordering);
} else {
- options.linear_solver_ordering = new ParameterBlockOrdering;
+ options.linear_solver_ordering.reset(new ParameterBlockOrdering);
const ProblemImpl::ParameterMap& parameter_map =
problem_impl->parameter_map();
for (ProblemImpl::ParameterMap::const_iterator it = parameter_map.begin();
@@ -784,6 +505,7 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
}
}
+
original_program->SetParameterBlockStatePtrsToUserStatePtrs();
// If the user requests gradient checking, construct a new
@@ -809,36 +531,31 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
scoped_ptr<Program> reduced_program(CreateReducedProgram(&options,
problem_impl,
&summary->fixed_cost,
- &summary->error));
+ &summary->message));
if (reduced_program == NULL) {
return;
}
- summary->num_parameter_blocks_reduced = reduced_program->NumParameterBlocks();
- summary->num_parameters_reduced = reduced_program->NumParameters();
- summary->num_residual_blocks_reduced = reduced_program->NumResidualBlocks();
- summary->num_effective_parameters_reduced =
- reduced_program->NumEffectiveParameters();
- summary->num_residuals_reduced = reduced_program->NumResiduals();
-
+ SummarizeReducedProgram(*reduced_program, summary);
if (summary->num_parameter_blocks_reduced == 0) {
summary->preprocessor_time_in_seconds =
WallTimeInSeconds() - solver_start_time;
- LOG(INFO) << "Terminating: FUNCTION_TOLERANCE reached. "
- << "No non-constant parameter blocks found.";
-
- // FUNCTION_TOLERANCE is the right convergence here, as we know
- // that the objective function is constant and cannot be changed
- // any further.
- summary->termination_type = FUNCTION_TOLERANCE;
+ summary->message =
+ "Function tolerance reached. "
+ "No non-constant parameter blocks found.";
+ summary->termination_type = CONVERGENCE;
+ VLOG_IF(1, options.logging_type != SILENT) << summary->message;
+ summary->initial_cost = summary->fixed_cost;
+ summary->final_cost = summary->fixed_cost;
const double post_process_start_time = WallTimeInSeconds();
-
SetSummaryFinalCost(summary);
// Ensure the program state is set to the user parameters on the way out.
original_program->SetParameterBlockStatePtrsToUserStatePtrs();
+ original_program->SetParameterOffsetsAndIndex();
+
summary->postprocessor_time_in_seconds =
WallTimeInSeconds() - post_process_start_time;
return;
@@ -847,48 +564,25 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
scoped_ptr<Evaluator> evaluator(CreateEvaluator(options,
problem_impl->parameter_map(),
reduced_program.get(),
- &summary->error));
+ &summary->message));
if (evaluator == NULL) {
return;
}
- // The optimizer works on contiguous parameter vectors; allocate some.
- Vector parameters(reduced_program->NumParameters());
-
- // Collect the discontiguous parameters into a contiguous state vector.
- reduced_program->ParameterBlocksToStateVector(parameters.data());
-
- Vector original_parameters = parameters;
-
const double minimizer_start_time = WallTimeInSeconds();
summary->preprocessor_time_in_seconds =
minimizer_start_time - solver_start_time;
// Run the optimization.
- LineSearchMinimize(options,
- reduced_program.get(),
- evaluator.get(),
- parameters.data(),
- summary);
-
- // If the user aborted mid-optimization or the optimization
- // terminated because of a numerical failure, then return without
- // updating user state.
- if (summary->termination_type == USER_ABORT ||
- summary->termination_type == NUMERICAL_FAILURE) {
- return;
- }
+ LineSearchMinimize(options, reduced_program.get(), evaluator.get(), summary);
const double post_process_start_time = WallTimeInSeconds();
- // Push the contiguous optimized parameters back to the user's parameters.
- reduced_program->StateVectorToParameterBlocks(parameters.data());
- reduced_program->CopyParameterBlockStateToUserState();
-
SetSummaryFinalCost(summary);
// Ensure the program state is set to the user parameters on the way out.
original_program->SetParameterBlockStatePtrsToUserStatePtrs();
+ original_program->SetParameterOffsetsAndIndex();
const map<string, double>& evaluator_time_statistics =
evaluator->TimeStatistics();
@@ -902,7 +596,6 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options,
summary->postprocessor_time_in_seconds =
WallTimeInSeconds() - post_process_start_time;
}
-#endif // CERES_NO_LINE_SEARCH_MINIMIZER
bool SolverImpl::IsOrderingValid(const Solver::Options& options,
const ProblemImpl* problem_impl,
@@ -966,133 +659,48 @@ bool SolverImpl::IsParameterBlockSetIndependent(
return true;
}
-
-// Strips varying parameters and residuals, maintaining order, and updating
-// num_eliminate_blocks.
-bool SolverImpl::RemoveFixedBlocksFromProgram(Program* program,
- ParameterBlockOrdering* ordering,
- double* fixed_cost,
- string* error) {
- vector<ParameterBlock*>* parameter_blocks =
- program->mutable_parameter_blocks();
-
- scoped_array<double> residual_block_evaluate_scratch;
- if (fixed_cost != NULL) {
- residual_block_evaluate_scratch.reset(
- new double[program->MaxScratchDoublesNeededForEvaluate()]);
- *fixed_cost = 0.0;
- }
-
- // Mark all the parameters as unused. Abuse the index member of the parameter
- // blocks for the marking.
- for (int i = 0; i < parameter_blocks->size(); ++i) {
- (*parameter_blocks)[i]->set_index(-1);
- }
-
- // Filter out residual that have all-constant parameters, and mark all the
- // parameter blocks that appear in residuals.
- {
- vector<ResidualBlock*>* residual_blocks =
- program->mutable_residual_blocks();
- int j = 0;
- for (int i = 0; i < residual_blocks->size(); ++i) {
- ResidualBlock* residual_block = (*residual_blocks)[i];
- int num_parameter_blocks = residual_block->NumParameterBlocks();
-
- // Determine if the residual block is fixed, and also mark varying
- // parameters that appear in the residual block.
- bool all_constant = true;
- for (int k = 0; k < num_parameter_blocks; k++) {
- ParameterBlock* parameter_block = residual_block->parameter_blocks()[k];
- if (!parameter_block->IsConstant()) {
- all_constant = false;
- parameter_block->set_index(1);
- }
- }
-
- if (!all_constant) {
- (*residual_blocks)[j++] = (*residual_blocks)[i];
- } else if (fixed_cost != NULL) {
- // The residual is constant and will be removed, so its cost is
- // added to the variable fixed_cost.
- double cost = 0.0;
- if (!residual_block->Evaluate(true,
- &cost,
- NULL,
- NULL,
- residual_block_evaluate_scratch.get())) {
- *error = StringPrintf("Evaluation of the residual %d failed during "
- "removal of fixed residual blocks.", i);
- return false;
- }
- *fixed_cost += cost;
- }
- }
- residual_blocks->resize(j);
- }
-
- // Filter out unused or fixed parameter blocks, and update
- // the ordering.
- {
- vector<ParameterBlock*>* parameter_blocks =
- program->mutable_parameter_blocks();
- int j = 0;
- for (int i = 0; i < parameter_blocks->size(); ++i) {
- ParameterBlock* parameter_block = (*parameter_blocks)[i];
- if (parameter_block->index() == 1) {
- (*parameter_blocks)[j++] = parameter_block;
- } else {
- ordering->Remove(parameter_block->mutable_user_state());
- }
- }
- parameter_blocks->resize(j);
- }
-
- if (!(((program->NumResidualBlocks() == 0) &&
- (program->NumParameterBlocks() == 0)) ||
- ((program->NumResidualBlocks() != 0) &&
- (program->NumParameterBlocks() != 0)))) {
- *error = "Congratulations, you found a bug in Ceres. Please report it.";
- return false;
- }
-
- return true;
-}
-
Program* SolverImpl::CreateReducedProgram(Solver::Options* options,
ProblemImpl* problem_impl,
double* fixed_cost,
string* error) {
- CHECK_NOTNULL(options->linear_solver_ordering);
+ CHECK_NOTNULL(options->linear_solver_ordering.get());
Program* original_program = problem_impl->mutable_program();
- scoped_ptr<Program> transformed_program(new Program(*original_program));
-
- ParameterBlockOrdering* linear_solver_ordering =
- options->linear_solver_ordering;
- const int min_group_id =
- linear_solver_ordering->group_to_elements().begin()->first;
- if (!RemoveFixedBlocksFromProgram(transformed_program.get(),
- linear_solver_ordering,
- fixed_cost,
- error)) {
+ vector<double*> removed_parameter_blocks;
+ scoped_ptr<Program> reduced_program(
+ original_program->CreateReducedProgram(&removed_parameter_blocks,
+ fixed_cost,
+ error));
+ if (reduced_program.get() == NULL) {
return NULL;
}
VLOG(2) << "Reduced problem: "
- << transformed_program->NumParameterBlocks()
+ << reduced_program->NumParameterBlocks()
<< " parameter blocks, "
- << transformed_program->NumParameters()
+ << reduced_program->NumParameters()
<< " parameters, "
- << transformed_program->NumResidualBlocks()
+ << reduced_program->NumResidualBlocks()
<< " residual blocks, "
- << transformed_program->NumResiduals()
+ << reduced_program->NumResiduals()
<< " residuals.";
- if (transformed_program->NumParameterBlocks() == 0) {
+ if (reduced_program->NumParameterBlocks() == 0) {
LOG(WARNING) << "No varying parameter blocks to optimize; "
<< "bailing early.";
- return transformed_program.release();
+ return reduced_program.release();
+ }
+
+ ParameterBlockOrdering* linear_solver_ordering =
+ options->linear_solver_ordering.get();
+ const int min_group_id =
+ linear_solver_ordering->MinNonZeroGroup();
+ linear_solver_ordering->Remove(removed_parameter_blocks);
+
+ ParameterBlockOrdering* inner_iteration_ordering =
+ options->inner_iteration_ordering.get();
+ if (inner_iteration_ordering != NULL) {
+ inner_iteration_ordering->Remove(removed_parameter_blocks);
}
if (IsSchurType(options->linear_solver_type) &&
@@ -1108,7 +716,15 @@ Program* SolverImpl::CreateReducedProgram(Solver::Options* options,
// as they assume there is at least one e_block. Thus, we
// automatically switch to the closest solver to the one indicated
// by the user.
- AlternateLinearSolverForSchurTypeLinearSolver(options);
+ if (options->linear_solver_type == ITERATIVE_SCHUR) {
+ options->preconditioner_type =
+ Preconditioner::PreconditionerForZeroEBlocks(
+ options->preconditioner_type);
+ }
+
+ options->linear_solver_type =
+ LinearSolver::LinearSolverForZeroEBlocks(
+ options->linear_solver_type);
}
if (IsSchurType(options->linear_solver_type)) {
@@ -1117,33 +733,34 @@ Program* SolverImpl::CreateReducedProgram(Solver::Options* options,
options->sparse_linear_algebra_library_type,
problem_impl->parameter_map(),
linear_solver_ordering,
- transformed_program.get(),
+ reduced_program.get(),
error)) {
return NULL;
}
- return transformed_program.release();
+ return reduced_program.release();
}
- if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
+ if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY &&
+ !options->dynamic_sparsity) {
if (!ReorderProgramForSparseNormalCholesky(
options->sparse_linear_algebra_library_type,
- linear_solver_ordering,
- transformed_program.get(),
+ *linear_solver_ordering,
+ reduced_program.get(),
error)) {
return NULL;
}
- return transformed_program.release();
+ return reduced_program.release();
}
- transformed_program->SetParameterOffsetsAndIndex();
- return transformed_program.release();
+ reduced_program->SetParameterOffsetsAndIndex();
+ return reduced_program.release();
}
LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
string* error) {
CHECK_NOTNULL(options);
- CHECK_NOTNULL(options->linear_solver_ordering);
+ CHECK_NOTNULL(options->linear_solver_ordering.get());
CHECK_NOTNULL(error);
if (options->trust_region_strategy_type == DOGLEG) {
@@ -1209,14 +826,6 @@ LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
}
#endif
-#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
- if (options->linear_solver_type == SPARSE_SCHUR) {
- *error = "Can't use SPARSE_SCHUR because neither SuiteSparse nor"
- "CXSparse was enabled when Ceres was compiled.";
- return NULL;
- }
-#endif
-
if (options->max_linear_solver_iterations <= 0) {
*error = "Solver::Options::max_linear_solver_iterations is not positive.";
return NULL;
@@ -1239,11 +848,14 @@ LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
options->max_linear_solver_iterations;
linear_solver_options.type = options->linear_solver_type;
linear_solver_options.preconditioner_type = options->preconditioner_type;
+ linear_solver_options.visibility_clustering_type =
+ options->visibility_clustering_type;
linear_solver_options.sparse_linear_algebra_library_type =
options->sparse_linear_algebra_library_type;
linear_solver_options.dense_linear_algebra_library_type =
options->dense_linear_algebra_library_type;
linear_solver_options.use_postordering = options->use_postordering;
+ linear_solver_options.dynamic_sparsity = options->dynamic_sparsity;
// Ignore user's postordering preferences and force it to be true if
// cholmod_camd is not available. This ensures that the linear
@@ -1259,13 +871,8 @@ LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
linear_solver_options.num_threads = options->num_linear_solver_threads;
options->num_linear_solver_threads = linear_solver_options.num_threads;
- const map<int, set<double*> >& groups =
- options->linear_solver_ordering->group_to_elements();
- for (map<int, set<double*> >::const_iterator it = groups.begin();
- it != groups.end();
- ++it) {
- linear_solver_options.elimination_groups.push_back(it->second.size());
- }
+ OrderingToGroupSizes(options->linear_solver_ordering.get(),
+ &linear_solver_options.elimination_groups);
// Schur type solvers, expect at least two elimination groups. If
// there is only one elimination group, then CreateReducedProgram
// guarantees that this group only contains e_blocks. Thus we add a
@@ -1278,109 +885,6 @@ LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
return LinearSolver::Create(linear_solver_options);
}
-
-// Find the minimum index of any parameter block to the given residual.
-// Parameter blocks that have indices greater than num_eliminate_blocks are
-// considered to have an index equal to num_eliminate_blocks.
-static int MinParameterBlock(const ResidualBlock* residual_block,
- int num_eliminate_blocks) {
- int min_parameter_block_position = num_eliminate_blocks;
- for (int i = 0; i < residual_block->NumParameterBlocks(); ++i) {
- ParameterBlock* parameter_block = residual_block->parameter_blocks()[i];
- if (!parameter_block->IsConstant()) {
- CHECK_NE(parameter_block->index(), -1)
- << "Did you forget to call Program::SetParameterOffsetsAndIndex()? "
- << "This is a Ceres bug; please contact the developers!";
- min_parameter_block_position = std::min(parameter_block->index(),
- min_parameter_block_position);
- }
- }
- return min_parameter_block_position;
-}
-
-// Reorder the residuals for program, if necessary, so that the residuals
-// involving each E block occur together. This is a necessary condition for the
-// Schur eliminator, which works on these "row blocks" in the jacobian.
-bool SolverImpl::LexicographicallyOrderResidualBlocks(
- const int num_eliminate_blocks,
- Program* program,
- string* error) {
- CHECK_GE(num_eliminate_blocks, 1)
- << "Congratulations, you found a Ceres bug! Please report this error "
- << "to the developers.";
-
- // Create a histogram of the number of residuals for each E block. There is an
- // extra bucket at the end to catch all non-eliminated F blocks.
- vector<int> residual_blocks_per_e_block(num_eliminate_blocks + 1);
- vector<ResidualBlock*>* residual_blocks = program->mutable_residual_blocks();
- vector<int> min_position_per_residual(residual_blocks->size());
- for (int i = 0; i < residual_blocks->size(); ++i) {
- ResidualBlock* residual_block = (*residual_blocks)[i];
- int position = MinParameterBlock(residual_block, num_eliminate_blocks);
- min_position_per_residual[i] = position;
- DCHECK_LE(position, num_eliminate_blocks);
- residual_blocks_per_e_block[position]++;
- }
-
- // Run a cumulative sum on the histogram, to obtain offsets to the start of
- // each histogram bucket (where each bucket is for the residuals for that
- // E-block).
- vector<int> offsets(num_eliminate_blocks + 1);
- std::partial_sum(residual_blocks_per_e_block.begin(),
- residual_blocks_per_e_block.end(),
- offsets.begin());
- CHECK_EQ(offsets.back(), residual_blocks->size())
- << "Congratulations, you found a Ceres bug! Please report this error "
- << "to the developers.";
-
- CHECK(find(residual_blocks_per_e_block.begin(),
- residual_blocks_per_e_block.end() - 1, 0) !=
- residual_blocks_per_e_block.end())
- << "Congratulations, you found a Ceres bug! Please report this error "
- << "to the developers.";
-
- // Fill in each bucket with the residual blocks for its corresponding E block.
- // Each bucket is individually filled from the back of the bucket to the front
- // of the bucket. The filling order among the buckets is dictated by the
- // residual blocks. This loop uses the offsets as counters; subtracting one
- // from each offset as a residual block is placed in the bucket. When the
- // filling is finished, the offset pointerts should have shifted down one
- // entry (this is verified below).
- vector<ResidualBlock*> reordered_residual_blocks(
- (*residual_blocks).size(), static_cast<ResidualBlock*>(NULL));
- for (int i = 0; i < residual_blocks->size(); ++i) {
- int bucket = min_position_per_residual[i];
-
- // Decrement the cursor, which should now point at the next empty position.
- offsets[bucket]--;
-
- // Sanity.
- CHECK(reordered_residual_blocks[offsets[bucket]] == NULL)
- << "Congratulations, you found a Ceres bug! Please report this error "
- << "to the developers.";
-
- reordered_residual_blocks[offsets[bucket]] = (*residual_blocks)[i];
- }
-
- // Sanity check #1: The difference in bucket offsets should match the
- // histogram sizes.
- for (int i = 0; i < num_eliminate_blocks; ++i) {
- CHECK_EQ(residual_blocks_per_e_block[i], offsets[i + 1] - offsets[i])
- << "Congratulations, you found a Ceres bug! Please report this error "
- << "to the developers.";
- }
- // Sanity check #2: No NULL's left behind.
- for (int i = 0; i < reordered_residual_blocks.size(); ++i) {
- CHECK(reordered_residual_blocks[i] != NULL)
- << "Congratulations, you found a Ceres bug! Please report this error "
- << "to the developers.";
- }
-
- // Now that the residuals are collected by E block, swap them in place.
- swap(*program->mutable_residual_blocks(), reordered_residual_blocks);
- return true;
-}
-
Evaluator* SolverImpl::CreateEvaluator(
const Solver::Options& options,
const ProblemImpl::ParameterMap& parameter_map,
@@ -1396,6 +900,7 @@ Evaluator* SolverImpl::CreateEvaluator(
->second.size())
: 0;
evaluator_options.num_threads = options.num_threads;
+ evaluator_options.dynamic_sparsity = options.dynamic_sparsity;
return Evaluator::Create(evaluator_options, program, error);
}
@@ -1411,374 +916,32 @@ CoordinateDescentMinimizer* SolverImpl::CreateInnerIterationMinimizer(
scoped_ptr<ParameterBlockOrdering> inner_iteration_ordering;
ParameterBlockOrdering* ordering_ptr = NULL;
- if (options.inner_iteration_ordering == NULL) {
- // Find a recursive decomposition of the Hessian matrix as a set
- // of independent sets of decreasing size and invert it. This
- // seems to work better in practice, i.e., Cameras before
- // points.
- inner_iteration_ordering.reset(new ParameterBlockOrdering);
- ComputeRecursiveIndependentSetOrdering(program,
- inner_iteration_ordering.get());
- inner_iteration_ordering->Reverse();
+ if (options.inner_iteration_ordering.get() == NULL) {
+ inner_iteration_ordering.reset(
+ CoordinateDescentMinimizer::CreateOrdering(program));
ordering_ptr = inner_iteration_ordering.get();
} else {
- const map<int, set<double*> >& group_to_elements =
- options.inner_iteration_ordering->group_to_elements();
-
- // Iterate over each group and verify that it is an independent
- // set.
- map<int, set<double*> >::const_iterator it = group_to_elements.begin();
- for ( ; it != group_to_elements.end(); ++it) {
- if (!IsParameterBlockSetIndependent(it->second,
- program.residual_blocks())) {
- summary->error =
- StringPrintf("The user-provided "
- "parameter_blocks_for_inner_iterations does not "
- "form an independent set. Group Id: %d", it->first);
- return NULL;
- }
+ ordering_ptr = options.inner_iteration_ordering.get();
+ if (!CoordinateDescentMinimizer::IsOrderingValid(program,
+ *ordering_ptr,
+ &summary->message)) {
+ return NULL;
}
- ordering_ptr = options.inner_iteration_ordering;
}
if (!inner_iteration_minimizer->Init(program,
parameter_map,
*ordering_ptr,
- &summary->error)) {
+ &summary->message)) {
return NULL;
}
summary->inner_iterations_used = true;
summary->inner_iteration_time_in_seconds = 0.0;
- SummarizeOrdering(ordering_ptr, &(summary->inner_iteration_ordering_used));
+ OrderingToGroupSizes(ordering_ptr,
+ &(summary->inner_iteration_ordering_used));
return inner_iteration_minimizer.release();
}
-void SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(
- Solver::Options* options) {
- if (!IsSchurType(options->linear_solver_type)) {
- return;
- }
-
- string msg = "No e_blocks remaining. Switching from ";
- if (options->linear_solver_type == SPARSE_SCHUR) {
- options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
- msg += "SPARSE_SCHUR to SPARSE_NORMAL_CHOLESKY.";
- } else if (options->linear_solver_type == DENSE_SCHUR) {
- // TODO(sameeragarwal): This is probably not a great choice.
- // Ideally, we should have a DENSE_NORMAL_CHOLESKY, that can
- // take a BlockSparseMatrix as input.
- options->linear_solver_type = DENSE_QR;
- msg += "DENSE_SCHUR to DENSE_QR.";
- } else if (options->linear_solver_type == ITERATIVE_SCHUR) {
- options->linear_solver_type = CGNR;
- if (options->preconditioner_type != IDENTITY) {
- msg += StringPrintf("ITERATIVE_SCHUR with %s preconditioner "
- "to CGNR with JACOBI preconditioner.",
- PreconditionerTypeToString(
- options->preconditioner_type));
- // CGNR currently only supports the JACOBI preconditioner.
- options->preconditioner_type = JACOBI;
- } else {
- msg += "ITERATIVE_SCHUR with IDENTITY preconditioner"
- "to CGNR with IDENTITY preconditioner.";
- }
- }
- LOG(WARNING) << msg;
-}
-
-bool SolverImpl::ApplyUserOrdering(
- const ProblemImpl::ParameterMap& parameter_map,
- const ParameterBlockOrdering* parameter_block_ordering,
- Program* program,
- string* error) {
- const int num_parameter_blocks = program->NumParameterBlocks();
- if (parameter_block_ordering->NumElements() != num_parameter_blocks) {
- *error = StringPrintf("User specified ordering does not have the same "
- "number of parameters as the problem. The problem"
- "has %d blocks while the ordering has %d blocks.",
- num_parameter_blocks,
- parameter_block_ordering->NumElements());
- return false;
- }
-
- vector<ParameterBlock*>* parameter_blocks =
- program->mutable_parameter_blocks();
- parameter_blocks->clear();
-
- const map<int, set<double*> >& groups =
- parameter_block_ordering->group_to_elements();
-
- for (map<int, set<double*> >::const_iterator group_it = groups.begin();
- group_it != groups.end();
- ++group_it) {
- const set<double*>& group = group_it->second;
- for (set<double*>::const_iterator parameter_block_ptr_it = group.begin();
- parameter_block_ptr_it != group.end();
- ++parameter_block_ptr_it) {
- ProblemImpl::ParameterMap::const_iterator parameter_block_it =
- parameter_map.find(*parameter_block_ptr_it);
- if (parameter_block_it == parameter_map.end()) {
- *error = StringPrintf("User specified ordering contains a pointer "
- "to a double that is not a parameter block in "
- "the problem. The invalid double is in group: %d",
- group_it->first);
- return false;
- }
- parameter_blocks->push_back(parameter_block_it->second);
- }
- }
- return true;
-}
-
-
-TripletSparseMatrix* SolverImpl::CreateJacobianBlockSparsityTranspose(
- const Program* program) {
-
- // Matrix to store the block sparsity structure of the Jacobian.
- TripletSparseMatrix* tsm =
- new TripletSparseMatrix(program->NumParameterBlocks(),
- program->NumResidualBlocks(),
- 10 * program->NumResidualBlocks());
- int num_nonzeros = 0;
- int* rows = tsm->mutable_rows();
- int* cols = tsm->mutable_cols();
- double* values = tsm->mutable_values();
-
- const vector<ResidualBlock*>& residual_blocks = program->residual_blocks();
- for (int c = 0; c < residual_blocks.size(); ++c) {
- const ResidualBlock* residual_block = residual_blocks[c];
- const int num_parameter_blocks = residual_block->NumParameterBlocks();
- ParameterBlock* const* parameter_blocks =
- residual_block->parameter_blocks();
-
- for (int j = 0; j < num_parameter_blocks; ++j) {
- if (parameter_blocks[j]->IsConstant()) {
- continue;
- }
-
- // Re-size the matrix if needed.
- if (num_nonzeros >= tsm->max_num_nonzeros()) {
- tsm->set_num_nonzeros(num_nonzeros);
- tsm->Reserve(2 * num_nonzeros);
- rows = tsm->mutable_rows();
- cols = tsm->mutable_cols();
- values = tsm->mutable_values();
- }
- CHECK_LT(num_nonzeros, tsm->max_num_nonzeros());
-
- const int r = parameter_blocks[j]->index();
- rows[num_nonzeros] = r;
- cols[num_nonzeros] = c;
- values[num_nonzeros] = 1.0;
- ++num_nonzeros;
- }
- }
-
- tsm->set_num_nonzeros(num_nonzeros);
- return tsm;
-}
-
-bool SolverImpl::ReorderProgramForSchurTypeLinearSolver(
- const LinearSolverType linear_solver_type,
- const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
- const ProblemImpl::ParameterMap& parameter_map,
- ParameterBlockOrdering* parameter_block_ordering,
- Program* program,
- string* error) {
- if (parameter_block_ordering->NumGroups() == 1) {
- // If the user supplied an parameter_block_ordering with just one
- // group, it is equivalent to the user supplying NULL as an
- // parameter_block_ordering. Ceres is completely free to choose the
- // parameter block ordering as it sees fit. For Schur type solvers,
- // this means that the user wishes for Ceres to identify the
- // e_blocks, which we do by computing a maximal independent set.
- vector<ParameterBlock*> schur_ordering;
- const int num_eliminate_blocks =
- ComputeStableSchurOrdering(*program, &schur_ordering);
-
- CHECK_EQ(schur_ordering.size(), program->NumParameterBlocks())
- << "Congratulations, you found a Ceres bug! Please report this error "
- << "to the developers.";
-
- // Update the parameter_block_ordering object.
- for (int i = 0; i < schur_ordering.size(); ++i) {
- double* parameter_block = schur_ordering[i]->mutable_user_state();
- const int group_id = (i < num_eliminate_blocks) ? 0 : 1;
- parameter_block_ordering->AddElementToGroup(parameter_block, group_id);
- }
-
- // We could call ApplyUserOrdering but this is cheaper and
- // simpler.
- swap(*program->mutable_parameter_blocks(), schur_ordering);
- } else {
- // The user provided an ordering with more than one elimination
- // group. Trust the user and apply the ordering.
- if (!ApplyUserOrdering(parameter_map,
- parameter_block_ordering,
- program,
- error)) {
- return false;
- }
- }
-
- // Pre-order the columns corresponding to the schur complement if
- // possible.
-#if !defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CAMD)
- if (linear_solver_type == SPARSE_SCHUR &&
- sparse_linear_algebra_library_type == SUITE_SPARSE) {
- vector<int> constraints;
- vector<ParameterBlock*>& parameter_blocks =
- *(program->mutable_parameter_blocks());
-
- for (int i = 0; i < parameter_blocks.size(); ++i) {
- constraints.push_back(
- parameter_block_ordering->GroupId(
- parameter_blocks[i]->mutable_user_state()));
- }
-
- // Renumber the entries of constraints to be contiguous integers
- // as camd requires that the group ids be in the range [0,
- // parameter_blocks.size() - 1].
- SolverImpl::CompactifyArray(&constraints);
-
- // Set the offsets and index for CreateJacobianSparsityTranspose.
- program->SetParameterOffsetsAndIndex();
- // Compute a block sparse presentation of J'.
- scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
- SolverImpl::CreateJacobianBlockSparsityTranspose(program));
-
- SuiteSparse ss;
- cholmod_sparse* block_jacobian_transpose =
- ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
-
- vector<int> ordering(parameter_blocks.size(), 0);
- ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
- &constraints[0],
- &ordering[0]);
- ss.Free(block_jacobian_transpose);
-
- const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
- for (int i = 0; i < program->NumParameterBlocks(); ++i) {
- parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
- }
- }
-#endif
-
- program->SetParameterOffsetsAndIndex();
- // Schur type solvers also require that their residual blocks be
- // lexicographically ordered.
- const int num_eliminate_blocks =
- parameter_block_ordering->group_to_elements().begin()->second.size();
- return LexicographicallyOrderResidualBlocks(num_eliminate_blocks,
- program,
- error);
-}
-
-bool SolverImpl::ReorderProgramForSparseNormalCholesky(
- const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
- const ParameterBlockOrdering* parameter_block_ordering,
- Program* program,
- string* error) {
- // Set the offsets and index for CreateJacobianSparsityTranspose.
- program->SetParameterOffsetsAndIndex();
- // Compute a block sparse presentation of J'.
- scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
- SolverImpl::CreateJacobianBlockSparsityTranspose(program));
-
- vector<int> ordering(program->NumParameterBlocks(), 0);
- vector<ParameterBlock*>& parameter_blocks =
- *(program->mutable_parameter_blocks());
-
- if (sparse_linear_algebra_library_type == SUITE_SPARSE) {
-#ifdef CERES_NO_SUITESPARSE
- *error = "Can't use SPARSE_NORMAL_CHOLESKY with SUITE_SPARSE because "
- "SuiteSparse was not enabled when Ceres was built.";
- return false;
-#else
- SuiteSparse ss;
- cholmod_sparse* block_jacobian_transpose =
- ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
-
-# ifdef CERES_NO_CAMD
- // No cholmod_camd, so ignore user's parameter_block_ordering and
- // use plain old AMD.
- ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose, &ordering[0]);
-# else
- if (parameter_block_ordering->NumGroups() > 1) {
- // If the user specified more than one elimination groups use them
- // to constrain the ordering.
- vector<int> constraints;
- for (int i = 0; i < parameter_blocks.size(); ++i) {
- constraints.push_back(
- parameter_block_ordering->GroupId(
- parameter_blocks[i]->mutable_user_state()));
- }
- ss.ConstrainedApproximateMinimumDegreeOrdering(
- block_jacobian_transpose,
- &constraints[0],
- &ordering[0]);
- } else {
- ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose,
- &ordering[0]);
- }
-# endif // CERES_NO_CAMD
-
- ss.Free(block_jacobian_transpose);
-#endif // CERES_NO_SUITESPARSE
-
- } else if (sparse_linear_algebra_library_type == CX_SPARSE) {
-#ifndef CERES_NO_CXSPARSE
-
- // CXSparse works with J'J instead of J'. So compute the block
- // sparsity for J'J and compute an approximate minimum degree
- // ordering.
- CXSparse cxsparse;
- cs_di* block_jacobian_transpose;
- block_jacobian_transpose =
- cxsparse.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
- cs_di* block_jacobian = cxsparse.TransposeMatrix(block_jacobian_transpose);
- cs_di* block_hessian =
- cxsparse.MatrixMatrixMultiply(block_jacobian_transpose, block_jacobian);
- cxsparse.Free(block_jacobian);
- cxsparse.Free(block_jacobian_transpose);
-
- cxsparse.ApproximateMinimumDegreeOrdering(block_hessian, &ordering[0]);
- cxsparse.Free(block_hessian);
-#else // CERES_NO_CXSPARSE
- *error = "Can't use SPARSE_NORMAL_CHOLESKY with CX_SPARSE because "
- "CXSparse was not enabled when Ceres was built.";
- return false;
-#endif // CERES_NO_CXSPARSE
- } else {
- *error = "Unknown sparse linear algebra library.";
- return false;
- }
-
- // Apply ordering.
- const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
- for (int i = 0; i < program->NumParameterBlocks(); ++i) {
- parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
- }
-
- program->SetParameterOffsetsAndIndex();
- return true;
-}
-
-void SolverImpl::CompactifyArray(vector<int>* array_ptr) {
- vector<int>& array = *array_ptr;
- const set<int> unique_group_ids(array.begin(), array.end());
- map<int, int> group_id_map;
- for (set<int>::const_iterator it = unique_group_ids.begin();
- it != unique_group_ids.end();
- ++it) {
- InsertOrDie(&group_id_map, *it, group_id_map.size());
- }
-
- for (int i = 0; i < array.size(); ++i) {
- array[i] = group_id_map[array[i]];
- }
-}
-
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/solver_impl.h b/internal/ceres/solver_impl.h
index 2b7ca3e..c42c32a 100644
--- a/internal/ceres/solver_impl.h
+++ b/internal/ceres/solver_impl.h
@@ -67,10 +67,8 @@ class SolverImpl {
CoordinateDescentMinimizer* inner_iteration_minimizer,
Evaluator* evaluator,
LinearSolver* linear_solver,
- double* parameters,
Solver::Summary* summary);
-#ifndef CERES_NO_LINE_SEARCH_MINIMIZER
static void LineSearchSolve(const Solver::Options& options,
ProblemImpl* problem_impl,
Solver::Summary* summary);
@@ -79,9 +77,7 @@ class SolverImpl {
static void LineSearchMinimize(const Solver::Options &options,
Program* program,
Evaluator* evaluator,
- double* parameters,
Solver::Summary* summary);
-#endif // CERES_NO_LINE_SEARCH_MINIMIZER
// Create the transformed Program, which has all the fixed blocks
// and residuals eliminated, and in the case of automatic schur
@@ -93,7 +89,7 @@ class SolverImpl {
static Program* CreateReducedProgram(Solver::Options* options,
ProblemImpl* problem_impl,
double* fixed_cost,
- string* error);
+ string* message);
// Create the appropriate linear solver, taking into account any
// config changes decided by CreateTransformedProgram(). The
@@ -101,38 +97,18 @@ class SolverImpl {
// selected; consider the case that the remaining elimininated
// blocks is zero after removing fixed blocks.
static LinearSolver* CreateLinearSolver(Solver::Options* options,
- string* error);
-
- // Reorder the residuals for program, if necessary, so that the
- // residuals involving e block (i.e., the first num_eliminate_block
- // parameter blocks) occur together. This is a necessary condition
- // for the Schur eliminator.
- static bool LexicographicallyOrderResidualBlocks(
- const int num_eliminate_blocks,
- Program* program,
- string* error);
+ string* message);
// Create the appropriate evaluator for the transformed program.
static Evaluator* CreateEvaluator(
const Solver::Options& options,
const ProblemImpl::ParameterMap& parameter_map,
Program* program,
- string* error);
-
- // Remove the fixed or unused parameter blocks and residuals
- // depending only on fixed parameters from the problem. Also updates
- // num_eliminate_blocks, since removed parameters changes the point
- // at which the eliminated blocks is valid. If fixed_cost is not
- // NULL, the residual blocks that are removed are evaluated and the
- // sum of their cost is returned in fixed_cost.
- static bool RemoveFixedBlocksFromProgram(Program* program,
- ParameterBlockOrdering* ordering,
- double* fixed_cost,
- string* error);
+ string* message);
static bool IsOrderingValid(const Solver::Options& options,
const ProblemImpl* problem_impl,
- string* error);
+ string* message);
static bool IsParameterBlockSetIndependent(
const set<double*>& parameter_block_ptrs,
@@ -143,78 +119,6 @@ class SolverImpl {
const Program& program,
const ProblemImpl::ParameterMap& parameter_map,
Solver::Summary* summary);
-
- // If the linear solver is of Schur type, then replace it with the
- // closest equivalent linear solver. This is done when the user
- // requested a Schur type solver but the problem structure makes it
- // impossible to use one.
- //
- // If the linear solver is not of Schur type, the function is a
- // no-op.
- static void AlternateLinearSolverForSchurTypeLinearSolver(
- Solver::Options* options);
-
- // Create a TripletSparseMatrix which contains the zero-one
- // structure corresponding to the block sparsity of the transpose of
- // the Jacobian matrix.
- //
- // Caller owns the result.
- static TripletSparseMatrix* CreateJacobianBlockSparsityTranspose(
- const Program* program);
-
- // Reorder the parameter blocks in program using the ordering
- static bool ApplyUserOrdering(
- const ProblemImpl::ParameterMap& parameter_map,
- const ParameterBlockOrdering* parameter_block_ordering,
- Program* program,
- string* error);
-
- // Sparse cholesky factorization routines when doing the sparse
- // cholesky factorization of the Jacobian matrix, reorders its
- // columns to reduce the fill-in. Compute this permutation and
- // re-order the parameter blocks.
- //
- // If the parameter_block_ordering contains more than one
- // elimination group and support for constrained fill-reducing
- // ordering is available in the sparse linear algebra library
- // (SuiteSparse version >= 4.2.0) then the fill reducing
- // ordering will take it into account, otherwise it will be ignored.
- static bool ReorderProgramForSparseNormalCholesky(
- const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
- const ParameterBlockOrdering* parameter_block_ordering,
- Program* program,
- string* error);
-
- // Schur type solvers require that all parameter blocks eliminated
- // by the Schur eliminator occur before others and the residuals be
- // sorted in lexicographic order of their parameter blocks.
- //
- // If the parameter_block_ordering only contains one elimination
- // group then a maximal independent set is computed and used as the
- // first elimination group, otherwise the user's ordering is used.
- //
- // If the linear solver type is SPARSE_SCHUR and support for
- // constrained fill-reducing ordering is available in the sparse
- // linear algebra library (SuiteSparse version >= 4.2.0) then
- // columns of the schur complement matrix are ordered to reduce the
- // fill-in the Cholesky factorization.
- //
- // Upon return, ordering contains the parameter block ordering that
- // was used to order the program.
- static bool ReorderProgramForSchurTypeLinearSolver(
- const LinearSolverType linear_solver_type,
- const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
- const ProblemImpl::ParameterMap& parameter_map,
- ParameterBlockOrdering* parameter_block_ordering,
- Program* program,
- string* error);
-
- // array contains a list of (possibly repeating) non-negative
- // integers. Let us assume that we have constructed another array
- // `p` by sorting and uniqueing the entries of array.
- // CompactifyArray replaces each entry in "array" with its position
- // in `p`.
- static void CompactifyArray(vector<int>* array);
};
} // namespace internal
diff --git a/internal/ceres/solver_impl_test.cc b/internal/ceres/solver_impl_test.cc
index 583ef4e..2d517c6 100644
--- a/internal/ceres/solver_impl_test.cc
+++ b/internal/ceres/solver_impl_test.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2014 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -42,660 +42,6 @@
namespace ceres {
namespace internal {
-// A cost function that sipmply returns its argument.
-class UnaryIdentityCostFunction : public SizedCostFunction<1, 1> {
- public:
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const {
- residuals[0] = parameters[0][0];
- if (jacobians != NULL && jacobians[0] != NULL) {
- jacobians[0][0] = 1.0;
- }
- return true;
- }
-};
-
-// Templated base class for the CostFunction signatures.
-template <int kNumResiduals, int N0, int N1, int N2>
-class MockCostFunctionBase : public
-SizedCostFunction<kNumResiduals, N0, N1, N2> {
- public:
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const {
- // Do nothing. This is never called.
- return true;
- }
-};
-
-class UnaryCostFunction : public MockCostFunctionBase<2, 1, 0, 0> {};
-class BinaryCostFunction : public MockCostFunctionBase<2, 1, 1, 0> {};
-class TernaryCostFunction : public MockCostFunctionBase<2, 1, 1, 1> {};
-
-TEST(SolverImpl, RemoveFixedBlocksNothingConstant) {
- ProblemImpl problem;
- double x;
- double y;
- double z;
-
- problem.AddParameterBlock(&x, 1);
- problem.AddParameterBlock(&y, 1);
- problem.AddParameterBlock(&z, 1);
- problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
- problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
-
- string error;
- {
- ParameterBlockOrdering ordering;
- ordering.AddElementToGroup(&x, 0);
- ordering.AddElementToGroup(&y, 0);
- ordering.AddElementToGroup(&z, 0);
-
- Program program(*problem.mutable_program());
- EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
- &ordering,
- NULL,
- &error));
- EXPECT_EQ(program.NumParameterBlocks(), 3);
- EXPECT_EQ(program.NumResidualBlocks(), 3);
- EXPECT_EQ(ordering.NumElements(), 3);
- }
-}
-
-TEST(SolverImpl, RemoveFixedBlocksAllParameterBlocksConstant) {
- ProblemImpl problem;
- double x;
-
- problem.AddParameterBlock(&x, 1);
- problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
- problem.SetParameterBlockConstant(&x);
-
- ParameterBlockOrdering ordering;
- ordering.AddElementToGroup(&x, 0);
-
- Program program(problem.program());
- string error;
- EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
- &ordering,
- NULL,
- &error));
- EXPECT_EQ(program.NumParameterBlocks(), 0);
- EXPECT_EQ(program.NumResidualBlocks(), 0);
- EXPECT_EQ(ordering.NumElements(), 0);
-}
-
-TEST(SolverImpl, RemoveFixedBlocksNoResidualBlocks) {
- ProblemImpl problem;
- double x;
- double y;
- double z;
-
- problem.AddParameterBlock(&x, 1);
- problem.AddParameterBlock(&y, 1);
- problem.AddParameterBlock(&z, 1);
-
- ParameterBlockOrdering ordering;
- ordering.AddElementToGroup(&x, 0);
- ordering.AddElementToGroup(&y, 0);
- ordering.AddElementToGroup(&z, 0);
-
-
- Program program(problem.program());
- string error;
- EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
- &ordering,
- NULL,
- &error));
- EXPECT_EQ(program.NumParameterBlocks(), 0);
- EXPECT_EQ(program.NumResidualBlocks(), 0);
- EXPECT_EQ(ordering.NumElements(), 0);
-}
-
-TEST(SolverImpl, RemoveFixedBlocksOneParameterBlockConstant) {
- ProblemImpl problem;
- double x;
- double y;
- double z;
-
- problem.AddParameterBlock(&x, 1);
- problem.AddParameterBlock(&y, 1);
- problem.AddParameterBlock(&z, 1);
-
- ParameterBlockOrdering ordering;
- ordering.AddElementToGroup(&x, 0);
- ordering.AddElementToGroup(&y, 0);
- ordering.AddElementToGroup(&z, 0);
-
- problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
- problem.SetParameterBlockConstant(&x);
-
-
- Program program(problem.program());
- string error;
- EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
- &ordering,
- NULL,
- &error));
- EXPECT_EQ(program.NumParameterBlocks(), 1);
- EXPECT_EQ(program.NumResidualBlocks(), 1);
- EXPECT_EQ(ordering.NumElements(), 1);
-}
-
-TEST(SolverImpl, RemoveFixedBlocksNumEliminateBlocks) {
- ProblemImpl problem;
- double x;
- double y;
- double z;
-
- problem.AddParameterBlock(&x, 1);
- problem.AddParameterBlock(&y, 1);
- problem.AddParameterBlock(&z, 1);
- problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
- problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
- problem.SetParameterBlockConstant(&x);
-
- ParameterBlockOrdering ordering;
- ordering.AddElementToGroup(&x, 0);
- ordering.AddElementToGroup(&y, 0);
- ordering.AddElementToGroup(&z, 1);
-
- Program program(problem.program());
- string error;
- EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
- &ordering,
- NULL,
- &error));
- EXPECT_EQ(program.NumParameterBlocks(), 2);
- EXPECT_EQ(program.NumResidualBlocks(), 2);
- EXPECT_EQ(ordering.NumElements(), 2);
- EXPECT_EQ(ordering.GroupId(&y), 0);
- EXPECT_EQ(ordering.GroupId(&z), 1);
-}
-
-TEST(SolverImpl, RemoveFixedBlocksFixedCost) {
- ProblemImpl problem;
- double x = 1.23;
- double y = 4.56;
- double z = 7.89;
-
- problem.AddParameterBlock(&x, 1);
- problem.AddParameterBlock(&y, 1);
- problem.AddParameterBlock(&z, 1);
- problem.AddResidualBlock(new UnaryIdentityCostFunction(), NULL, &x);
- problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
- problem.SetParameterBlockConstant(&x);
-
- ParameterBlockOrdering ordering;
- ordering.AddElementToGroup(&x, 0);
- ordering.AddElementToGroup(&y, 0);
- ordering.AddElementToGroup(&z, 1);
-
- double fixed_cost = 0.0;
- Program program(problem.program());
-
- double expected_fixed_cost;
- ResidualBlock *expected_removed_block = program.residual_blocks()[0];
- scoped_array<double> scratch(
- new double[expected_removed_block->NumScratchDoublesForEvaluate()]);
- expected_removed_block->Evaluate(true,
- &expected_fixed_cost,
- NULL,
- NULL,
- scratch.get());
-
- string error;
- EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
- &ordering,
- &fixed_cost,
- &error));
- EXPECT_EQ(program.NumParameterBlocks(), 2);
- EXPECT_EQ(program.NumResidualBlocks(), 2);
- EXPECT_EQ(ordering.NumElements(), 2);
- EXPECT_EQ(ordering.GroupId(&y), 0);
- EXPECT_EQ(ordering.GroupId(&z), 1);
- EXPECT_DOUBLE_EQ(fixed_cost, expected_fixed_cost);
-}
-
-TEST(SolverImpl, ReorderResidualBlockNormalFunction) {
- ProblemImpl problem;
- double x;
- double y;
- double z;
-
- problem.AddParameterBlock(&x, 1);
- problem.AddParameterBlock(&y, 1);
- problem.AddParameterBlock(&z, 1);
-
- problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &x);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
- problem.AddResidualBlock(new UnaryCostFunction(), NULL, &z);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
- problem.AddResidualBlock(new UnaryCostFunction(), NULL, &y);
-
- ParameterBlockOrdering* ordering = new ParameterBlockOrdering;
- ordering->AddElementToGroup(&x, 0);
- ordering->AddElementToGroup(&y, 0);
- ordering->AddElementToGroup(&z, 1);
-
- Solver::Options options;
- options.linear_solver_type = DENSE_SCHUR;
- options.linear_solver_ordering = ordering;
-
- const vector<ResidualBlock*>& residual_blocks =
- problem.program().residual_blocks();
-
- vector<ResidualBlock*> expected_residual_blocks;
-
- // This is a bit fragile, but it serves the purpose. We know the
- // bucketing algorithm that the reordering function uses, so we
- // expect the order for residual blocks for each e_block to be
- // filled in reverse.
- expected_residual_blocks.push_back(residual_blocks[4]);
- expected_residual_blocks.push_back(residual_blocks[1]);
- expected_residual_blocks.push_back(residual_blocks[0]);
- expected_residual_blocks.push_back(residual_blocks[5]);
- expected_residual_blocks.push_back(residual_blocks[2]);
- expected_residual_blocks.push_back(residual_blocks[3]);
-
- Program* program = problem.mutable_program();
- program->SetParameterOffsetsAndIndex();
-
- string error;
- EXPECT_TRUE(SolverImpl::LexicographicallyOrderResidualBlocks(
- 2,
- problem.mutable_program(),
- &error));
- EXPECT_EQ(residual_blocks.size(), expected_residual_blocks.size());
- for (int i = 0; i < expected_residual_blocks.size(); ++i) {
- EXPECT_EQ(residual_blocks[i], expected_residual_blocks[i]);
- }
-}
-
-TEST(SolverImpl, ReorderResidualBlockNormalFunctionWithFixedBlocks) {
- ProblemImpl problem;
- double x;
- double y;
- double z;
-
- problem.AddParameterBlock(&x, 1);
- problem.AddParameterBlock(&y, 1);
- problem.AddParameterBlock(&z, 1);
-
- // Set one parameter block constant.
- problem.SetParameterBlockConstant(&z);
-
- // Mark residuals for x's row block with "x" for readability.
- problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x); // 0 x
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &x); // 1 x
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y); // 2
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y); // 3
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &z); // 4 x
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y); // 5
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &z); // 6 x
- problem.AddResidualBlock(new UnaryCostFunction(), NULL, &y); // 7
-
- ParameterBlockOrdering* ordering = new ParameterBlockOrdering;
- ordering->AddElementToGroup(&x, 0);
- ordering->AddElementToGroup(&z, 0);
- ordering->AddElementToGroup(&y, 1);
-
- Solver::Options options;
- options.linear_solver_type = DENSE_SCHUR;
- options.linear_solver_ordering = ordering;
-
- // Create the reduced program. This should remove the fixed block "z",
- // marking the index to -1 at the same time. x and y also get indices.
- string error;
- scoped_ptr<Program> reduced_program(
- SolverImpl::CreateReducedProgram(&options, &problem, NULL, &error));
-
- const vector<ResidualBlock*>& residual_blocks =
- problem.program().residual_blocks();
-
- // This is a bit fragile, but it serves the purpose. We know the
- // bucketing algorithm that the reordering function uses, so we
- // expect the order for residual blocks for each e_block to be
- // filled in reverse.
-
- vector<ResidualBlock*> expected_residual_blocks;
-
- // Row block for residuals involving "x". These are marked "x" in the block
- // of code calling AddResidual() above.
- expected_residual_blocks.push_back(residual_blocks[6]);
- expected_residual_blocks.push_back(residual_blocks[4]);
- expected_residual_blocks.push_back(residual_blocks[1]);
- expected_residual_blocks.push_back(residual_blocks[0]);
-
- // Row block for residuals involving "y".
- expected_residual_blocks.push_back(residual_blocks[7]);
- expected_residual_blocks.push_back(residual_blocks[5]);
- expected_residual_blocks.push_back(residual_blocks[3]);
- expected_residual_blocks.push_back(residual_blocks[2]);
-
- EXPECT_EQ(reduced_program->residual_blocks().size(),
- expected_residual_blocks.size());
- for (int i = 0; i < expected_residual_blocks.size(); ++i) {
- EXPECT_EQ(reduced_program->residual_blocks()[i],
- expected_residual_blocks[i]);
- }
-}
-
-TEST(SolverImpl, AutomaticSchurReorderingRespectsConstantBlocks) {
- ProblemImpl problem;
- double x;
- double y;
- double z;
-
- problem.AddParameterBlock(&x, 1);
- problem.AddParameterBlock(&y, 1);
- problem.AddParameterBlock(&z, 1);
-
- // Set one parameter block constant.
- problem.SetParameterBlockConstant(&z);
-
- problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &x);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &z);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
- problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &z);
- problem.AddResidualBlock(new UnaryCostFunction(), NULL, &y);
- problem.AddResidualBlock(new UnaryCostFunction(), NULL, &z);
-
- ParameterBlockOrdering* ordering = new ParameterBlockOrdering;
- ordering->AddElementToGroup(&x, 0);
- ordering->AddElementToGroup(&z, 0);
- ordering->AddElementToGroup(&y, 0);
-
- Solver::Options options;
- options.linear_solver_type = DENSE_SCHUR;
- options.linear_solver_ordering = ordering;
-
- string error;
- scoped_ptr<Program> reduced_program(
- SolverImpl::CreateReducedProgram(&options, &problem, NULL, &error));
-
- const vector<ResidualBlock*>& residual_blocks =
- reduced_program->residual_blocks();
- const vector<ParameterBlock*>& parameter_blocks =
- reduced_program->parameter_blocks();
-
- const vector<ResidualBlock*>& original_residual_blocks =
- problem.program().residual_blocks();
-
- EXPECT_EQ(residual_blocks.size(), 8);
- EXPECT_EQ(reduced_program->parameter_blocks().size(), 2);
-
- // Verify that right parmeter block and the residual blocks have
- // been removed.
- for (int i = 0; i < 8; ++i) {
- EXPECT_NE(residual_blocks[i], original_residual_blocks.back());
- }
- for (int i = 0; i < 2; ++i) {
- EXPECT_NE(parameter_blocks[i]->mutable_user_state(), &z);
- }
-}
-
-TEST(SolverImpl, ApplyUserOrderingOrderingTooSmall) {
- ProblemImpl problem;
- double x;
- double y;
- double z;
-
- problem.AddParameterBlock(&x, 1);
- problem.AddParameterBlock(&y, 1);
- problem.AddParameterBlock(&z, 1);
-
- ParameterBlockOrdering ordering;
- ordering.AddElementToGroup(&x, 0);
- ordering.AddElementToGroup(&y, 1);
-
- Program program(problem.program());
- string error;
- EXPECT_FALSE(SolverImpl::ApplyUserOrdering(problem.parameter_map(),
- &ordering,
- &program,
- &error));
-}
-
-TEST(SolverImpl, ApplyUserOrderingNormal) {
- ProblemImpl problem;
- double x;
- double y;
- double z;
-
- problem.AddParameterBlock(&x, 1);
- problem.AddParameterBlock(&y, 1);
- problem.AddParameterBlock(&z, 1);
-
- ParameterBlockOrdering ordering;
- ordering.AddElementToGroup(&x, 0);
- ordering.AddElementToGroup(&y, 2);
- ordering.AddElementToGroup(&z, 1);
-
- Program* program = problem.mutable_program();
- string error;
-
- EXPECT_TRUE(SolverImpl::ApplyUserOrdering(problem.parameter_map(),
- &ordering,
- program,
- &error));
- const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
-
- EXPECT_EQ(parameter_blocks.size(), 3);
- EXPECT_EQ(parameter_blocks[0]->user_state(), &x);
- EXPECT_EQ(parameter_blocks[1]->user_state(), &z);
- EXPECT_EQ(parameter_blocks[2]->user_state(), &y);
-}
-
-#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
-TEST(SolverImpl, CreateLinearSolverNoSuiteSparse) {
- Solver::Options options;
- options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
- // CreateLinearSolver assumes a non-empty ordering.
- options.linear_solver_ordering = new ParameterBlockOrdering;
- string error;
- EXPECT_FALSE(SolverImpl::CreateLinearSolver(&options, &error));
-}
-#endif
-
-TEST(SolverImpl, CreateLinearSolverNegativeMaxNumIterations) {
- Solver::Options options;
- options.linear_solver_type = DENSE_QR;
- options.max_linear_solver_iterations = -1;
- // CreateLinearSolver assumes a non-empty ordering.
- options.linear_solver_ordering = new ParameterBlockOrdering;
- string error;
- EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
- static_cast<LinearSolver*>(NULL));
-}
-
-TEST(SolverImpl, CreateLinearSolverNegativeMinNumIterations) {
- Solver::Options options;
- options.linear_solver_type = DENSE_QR;
- options.min_linear_solver_iterations = -1;
- // CreateLinearSolver assumes a non-empty ordering.
- options.linear_solver_ordering = new ParameterBlockOrdering;
- string error;
- EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
- static_cast<LinearSolver*>(NULL));
-}
-
-TEST(SolverImpl, CreateLinearSolverMaxLessThanMinIterations) {
- Solver::Options options;
- options.linear_solver_type = DENSE_QR;
- options.min_linear_solver_iterations = 10;
- options.max_linear_solver_iterations = 5;
- options.linear_solver_ordering = new ParameterBlockOrdering;
- string error;
- EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
- static_cast<LinearSolver*>(NULL));
-}
-
-TEST(SolverImpl, CreateLinearSolverDenseSchurMultipleThreads) {
- Solver::Options options;
- options.linear_solver_type = DENSE_SCHUR;
- options.num_linear_solver_threads = 2;
- // The Schur type solvers can only be created with the Ordering
- // contains at least one elimination group.
- options.linear_solver_ordering = new ParameterBlockOrdering;
- double x;
- double y;
- options.linear_solver_ordering->AddElementToGroup(&x, 0);
- options.linear_solver_ordering->AddElementToGroup(&y, 0);
-
- string error;
- scoped_ptr<LinearSolver> solver(
- SolverImpl::CreateLinearSolver(&options, &error));
- EXPECT_TRUE(solver != NULL);
- EXPECT_EQ(options.linear_solver_type, DENSE_SCHUR);
- EXPECT_EQ(options.num_linear_solver_threads, 2);
-}
-
-TEST(SolverImpl, CreateIterativeLinearSolverForDogleg) {
- Solver::Options options;
- options.trust_region_strategy_type = DOGLEG;
- // CreateLinearSolver assumes a non-empty ordering.
- options.linear_solver_ordering = new ParameterBlockOrdering;
- string error;
- options.linear_solver_type = ITERATIVE_SCHUR;
- EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
- static_cast<LinearSolver*>(NULL));
-
- options.linear_solver_type = CGNR;
- EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
- static_cast<LinearSolver*>(NULL));
-}
-
-TEST(SolverImpl, CreateLinearSolverNormalOperation) {
- Solver::Options options;
- scoped_ptr<LinearSolver> solver;
- options.linear_solver_type = DENSE_QR;
- // CreateLinearSolver assumes a non-empty ordering.
- options.linear_solver_ordering = new ParameterBlockOrdering;
- string error;
- solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
- EXPECT_EQ(options.linear_solver_type, DENSE_QR);
- EXPECT_TRUE(solver.get() != NULL);
-
- options.linear_solver_type = DENSE_NORMAL_CHOLESKY;
- solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
- EXPECT_EQ(options.linear_solver_type, DENSE_NORMAL_CHOLESKY);
- EXPECT_TRUE(solver.get() != NULL);
-
-#ifndef CERES_NO_SUITESPARSE
- options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
- options.sparse_linear_algebra_library_type = SUITE_SPARSE;
- solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
- EXPECT_EQ(options.linear_solver_type, SPARSE_NORMAL_CHOLESKY);
- EXPECT_TRUE(solver.get() != NULL);
-#endif
-
-#ifndef CERES_NO_CXSPARSE
- options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
- options.sparse_linear_algebra_library_type = CX_SPARSE;
- solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
- EXPECT_EQ(options.linear_solver_type, SPARSE_NORMAL_CHOLESKY);
- EXPECT_TRUE(solver.get() != NULL);
-#endif
-
- double x;
- double y;
- options.linear_solver_ordering->AddElementToGroup(&x, 0);
- options.linear_solver_ordering->AddElementToGroup(&y, 0);
-
- options.linear_solver_type = DENSE_SCHUR;
- solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
- EXPECT_EQ(options.linear_solver_type, DENSE_SCHUR);
- EXPECT_TRUE(solver.get() != NULL);
-
- options.linear_solver_type = SPARSE_SCHUR;
- solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
-
-#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
- EXPECT_TRUE(SolverImpl::CreateLinearSolver(&options, &error) == NULL);
-#else
- EXPECT_TRUE(solver.get() != NULL);
- EXPECT_EQ(options.linear_solver_type, SPARSE_SCHUR);
-#endif
-
- options.linear_solver_type = ITERATIVE_SCHUR;
- solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
- EXPECT_EQ(options.linear_solver_type, ITERATIVE_SCHUR);
- EXPECT_TRUE(solver.get() != NULL);
-}
-
-struct QuadraticCostFunction {
- template <typename T> bool operator()(const T* const x,
- T* residual) const {
- residual[0] = T(5.0) - *x;
- return true;
- }
-};
-
-struct RememberingCallback : public IterationCallback {
- explicit RememberingCallback(double *x) : calls(0), x(x) {}
- virtual ~RememberingCallback() {}
- virtual CallbackReturnType operator()(const IterationSummary& summary) {
- x_values.push_back(*x);
- return SOLVER_CONTINUE;
- }
- int calls;
- double *x;
- vector<double> x_values;
-};
-
-TEST(SolverImpl, UpdateStateEveryIterationOption) {
- double x = 50.0;
- const double original_x = x;
-
- scoped_ptr<CostFunction> cost_function(
- new AutoDiffCostFunction<QuadraticCostFunction, 1, 1>(
- new QuadraticCostFunction));
-
- Problem::Options problem_options;
- problem_options.cost_function_ownership = DO_NOT_TAKE_OWNERSHIP;
- ProblemImpl problem(problem_options);
- problem.AddResidualBlock(cost_function.get(), NULL, &x);
-
- Solver::Options options;
- options.linear_solver_type = DENSE_QR;
-
- RememberingCallback callback(&x);
- options.callbacks.push_back(&callback);
-
- Solver::Summary summary;
-
- int num_iterations;
-
- // First try: no updating.
- SolverImpl::Solve(options, &problem, &summary);
- num_iterations = summary.num_successful_steps +
- summary.num_unsuccessful_steps;
- EXPECT_GT(num_iterations, 1);
- for (int i = 0; i < callback.x_values.size(); ++i) {
- EXPECT_EQ(50.0, callback.x_values[i]);
- }
-
- // Second try: with updating
- x = 50.0;
- options.update_state_every_iteration = true;
- callback.x_values.clear();
- SolverImpl::Solve(options, &problem, &summary);
- num_iterations = summary.num_successful_steps +
- summary.num_unsuccessful_steps;
- EXPECT_GT(num_iterations, 1);
- EXPECT_EQ(original_x, callback.x_values[0]);
- EXPECT_NE(original_x, callback.x_values[1]);
-}
-
// The parameters must be in separate blocks so that they can be individually
// set constant or not.
struct Quadratic4DCostFunction {
@@ -753,289 +99,8 @@ TEST(SolverImpl, ConstantParameterBlocksDoNotChangeAndStateInvariantKept) {
EXPECT_EQ(&y, problem.program().parameter_blocks()[1]->state());
EXPECT_EQ(&z, problem.program().parameter_blocks()[2]->state());
EXPECT_EQ(&w, problem.program().parameter_blocks()[3]->state());
-}
-
-TEST(SolverImpl, NoParameterBlocks) {
- ProblemImpl problem_impl;
- Solver::Options options;
- Solver::Summary summary;
- SolverImpl::Solve(options, &problem_impl, &summary);
- EXPECT_EQ(summary.termination_type, DID_NOT_RUN);
- EXPECT_EQ(summary.error, "Problem contains no parameter blocks.");
-}
-
-TEST(SolverImpl, NoResiduals) {
- ProblemImpl problem_impl;
- Solver::Options options;
- Solver::Summary summary;
- double x = 1;
- problem_impl.AddParameterBlock(&x, 1);
- SolverImpl::Solve(options, &problem_impl, &summary);
- EXPECT_EQ(summary.termination_type, DID_NOT_RUN);
- EXPECT_EQ(summary.error, "Problem contains no residual blocks.");
-}
-
-
-TEST(SolverImpl, ProblemIsConstant) {
- ProblemImpl problem_impl;
- Solver::Options options;
- Solver::Summary summary;
- double x = 1;
- problem_impl.AddResidualBlock(new UnaryIdentityCostFunction, NULL, &x);
- problem_impl.SetParameterBlockConstant(&x);
- SolverImpl::Solve(options, &problem_impl, &summary);
- EXPECT_EQ(summary.termination_type, FUNCTION_TOLERANCE);
- EXPECT_EQ(summary.initial_cost, 1.0 / 2.0);
- EXPECT_EQ(summary.final_cost, 1.0 / 2.0);
-}
-
-TEST(SolverImpl, AlternateLinearSolverForSchurTypeLinearSolver) {
- Solver::Options options;
-
- options.linear_solver_type = DENSE_QR;
- SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
- EXPECT_EQ(options.linear_solver_type, DENSE_QR);
-
- options.linear_solver_type = DENSE_NORMAL_CHOLESKY;
- SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
- EXPECT_EQ(options.linear_solver_type, DENSE_NORMAL_CHOLESKY);
-
- options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
- SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
- EXPECT_EQ(options.linear_solver_type, SPARSE_NORMAL_CHOLESKY);
-
- options.linear_solver_type = CGNR;
- SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
- EXPECT_EQ(options.linear_solver_type, CGNR);
-
- options.linear_solver_type = DENSE_SCHUR;
- SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
- EXPECT_EQ(options.linear_solver_type, DENSE_QR);
-
- options.linear_solver_type = SPARSE_SCHUR;
- SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
- EXPECT_EQ(options.linear_solver_type, SPARSE_NORMAL_CHOLESKY);
-
- options.linear_solver_type = ITERATIVE_SCHUR;
- options.preconditioner_type = IDENTITY;
- SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
- EXPECT_EQ(options.linear_solver_type, CGNR);
- EXPECT_EQ(options.preconditioner_type, IDENTITY);
-
- options.linear_solver_type = ITERATIVE_SCHUR;
- options.preconditioner_type = JACOBI;
- SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
- EXPECT_EQ(options.linear_solver_type, CGNR);
- EXPECT_EQ(options.preconditioner_type, JACOBI);
-
- options.linear_solver_type = ITERATIVE_SCHUR;
- options.preconditioner_type = SCHUR_JACOBI;
- SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
- EXPECT_EQ(options.linear_solver_type, CGNR);
- EXPECT_EQ(options.preconditioner_type, JACOBI);
-
- options.linear_solver_type = ITERATIVE_SCHUR;
- options.preconditioner_type = CLUSTER_JACOBI;
- SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
- EXPECT_EQ(options.linear_solver_type, CGNR);
- EXPECT_EQ(options.preconditioner_type, JACOBI);
-
- options.linear_solver_type = ITERATIVE_SCHUR;
- options.preconditioner_type = CLUSTER_TRIDIAGONAL;
- SolverImpl::AlternateLinearSolverForSchurTypeLinearSolver(&options);
- EXPECT_EQ(options.linear_solver_type, CGNR);
- EXPECT_EQ(options.preconditioner_type, JACOBI);
-}
-
-TEST(SolverImpl, CreateJacobianBlockSparsityTranspose) {
- ProblemImpl problem;
- double x[2];
- double y[3];
- double z;
-
- problem.AddParameterBlock(x, 2);
- problem.AddParameterBlock(y, 3);
- problem.AddParameterBlock(&z, 1);
-
- problem.AddResidualBlock(new MockCostFunctionBase<2, 2, 0, 0>(), NULL, x);
- problem.AddResidualBlock(new MockCostFunctionBase<3, 1, 2, 0>(), NULL, &z, x);
- problem.AddResidualBlock(new MockCostFunctionBase<4, 1, 3, 0>(), NULL, &z, y);
- problem.AddResidualBlock(new MockCostFunctionBase<5, 1, 3, 0>(), NULL, &z, y);
- problem.AddResidualBlock(new MockCostFunctionBase<1, 2, 1, 0>(), NULL, x, &z);
- problem.AddResidualBlock(new MockCostFunctionBase<2, 1, 3, 0>(), NULL, &z, y);
- problem.AddResidualBlock(new MockCostFunctionBase<2, 2, 1, 0>(), NULL, x, &z);
- problem.AddResidualBlock(new MockCostFunctionBase<1, 3, 0, 0>(), NULL, y);
-
- TripletSparseMatrix expected_block_sparse_jacobian(3, 8, 14);
- {
- int* rows = expected_block_sparse_jacobian.mutable_rows();
- int* cols = expected_block_sparse_jacobian.mutable_cols();
- double* values = expected_block_sparse_jacobian.mutable_values();
- rows[0] = 0;
- cols[0] = 0;
-
- rows[1] = 2;
- cols[1] = 1;
- rows[2] = 0;
- cols[2] = 1;
-
- rows[3] = 2;
- cols[3] = 2;
- rows[4] = 1;
- cols[4] = 2;
-
- rows[5] = 2;
- cols[5] = 3;
- rows[6] = 1;
- cols[6] = 3;
-
- rows[7] = 0;
- cols[7] = 4;
- rows[8] = 2;
- cols[8] = 4;
-
- rows[9] = 2;
- cols[9] = 5;
- rows[10] = 1;
- cols[10] = 5;
-
- rows[11] = 0;
- cols[11] = 6;
- rows[12] = 2;
- cols[12] = 6;
-
- rows[13] = 1;
- cols[13] = 7;
- fill(values, values + 14, 1.0);
- expected_block_sparse_jacobian.set_num_nonzeros(14);
- }
-
- Program* program = problem.mutable_program();
- program->SetParameterOffsetsAndIndex();
-
- scoped_ptr<TripletSparseMatrix> actual_block_sparse_jacobian(
- SolverImpl::CreateJacobianBlockSparsityTranspose(program));
-
- Matrix expected_dense_jacobian;
- expected_block_sparse_jacobian.ToDenseMatrix(&expected_dense_jacobian);
-
- Matrix actual_dense_jacobian;
- actual_block_sparse_jacobian->ToDenseMatrix(&actual_dense_jacobian);
- EXPECT_EQ((expected_dense_jacobian - actual_dense_jacobian).norm(), 0.0);
-}
-
-template <int kNumResiduals, int kNumParameterBlocks>
-class NumParameterBlocksCostFunction : public CostFunction {
- public:
- NumParameterBlocksCostFunction() {
- set_num_residuals(kNumResiduals);
- for (int i = 0; i < kNumParameterBlocks; ++i) {
- mutable_parameter_block_sizes()->push_back(1);
- }
- }
-
- virtual ~NumParameterBlocksCostFunction() {
- }
-
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const {
- return true;
- }
-};
-
-TEST(SolverImpl, ReallocationInCreateJacobianBlockSparsityTranspose) {
- // CreateJacobianBlockSparsityTranspose starts with a conservative
- // estimate of the size of the sparsity pattern. This test ensures
- // that when those estimates are violated, the reallocation/resizing
- // logic works correctly.
-
- ProblemImpl problem;
- double x[20];
-
- vector<double*> parameter_blocks;
- for (int i = 0; i < 20; ++i) {
- problem.AddParameterBlock(x + i, 1);
- parameter_blocks.push_back(x + i);
- }
-
- problem.AddResidualBlock(new NumParameterBlocksCostFunction<1, 20>(),
- NULL,
- parameter_blocks);
-
- TripletSparseMatrix expected_block_sparse_jacobian(20, 1, 20);
- {
- int* rows = expected_block_sparse_jacobian.mutable_rows();
- int* cols = expected_block_sparse_jacobian.mutable_cols();
- for (int i = 0; i < 20; ++i) {
- rows[i] = i;
- cols[i] = 0;
- }
-
- double* values = expected_block_sparse_jacobian.mutable_values();
- fill(values, values + 20, 1.0);
- expected_block_sparse_jacobian.set_num_nonzeros(20);
- }
-
- Program* program = problem.mutable_program();
- program->SetParameterOffsetsAndIndex();
-
- scoped_ptr<TripletSparseMatrix> actual_block_sparse_jacobian(
- SolverImpl::CreateJacobianBlockSparsityTranspose(program));
-
- Matrix expected_dense_jacobian;
- expected_block_sparse_jacobian.ToDenseMatrix(&expected_dense_jacobian);
-
- Matrix actual_dense_jacobian;
- actual_block_sparse_jacobian->ToDenseMatrix(&actual_dense_jacobian);
- EXPECT_EQ((expected_dense_jacobian - actual_dense_jacobian).norm(), 0.0);
-}
-
-TEST(CompactifyArray, ContiguousEntries) {
- vector<int> array;
- array.push_back(0);
- array.push_back(1);
- vector<int> expected = array;
- SolverImpl::CompactifyArray(&array);
- EXPECT_EQ(array, expected);
- array.clear();
-
- array.push_back(1);
- array.push_back(0);
- expected = array;
- SolverImpl::CompactifyArray(&array);
- EXPECT_EQ(array, expected);
-}
-
-TEST(CompactifyArray, NonContiguousEntries) {
- vector<int> array;
- array.push_back(0);
- array.push_back(2);
- vector<int> expected;
- expected.push_back(0);
- expected.push_back(1);
- SolverImpl::CompactifyArray(&array);
- EXPECT_EQ(array, expected);
-}
-
-TEST(CompactifyArray, NonContiguousRepeatingEntries) {
- vector<int> array;
- array.push_back(3);
- array.push_back(1);
- array.push_back(0);
- array.push_back(0);
- array.push_back(0);
- array.push_back(5);
- vector<int> expected;
- expected.push_back(2);
- expected.push_back(1);
- expected.push_back(0);
- expected.push_back(0);
- expected.push_back(0);
- expected.push_back(3);
- SolverImpl::CompactifyArray(&array);
- EXPECT_EQ(array, expected);
+ EXPECT_TRUE(problem.program().IsValid());
}
} // namespace internal
diff --git a/internal/ceres/solver_test.cc b/internal/ceres/solver_test.cc
new file mode 100644
index 0000000..2a136f7
--- /dev/null
+++ b/internal/ceres/solver_test.cc
@@ -0,0 +1,298 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/solver.h"
+
+#include <limits>
+#include <cmath>
+#include <vector>
+#include "gtest/gtest.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/autodiff_cost_function.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/problem.h"
+#include "ceres/problem_impl.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(SolverOptions, DefaultTrustRegionOptionsAreValid) {
+ Solver::Options options;
+ options.minimizer_type = TRUST_REGION;
+ string error;
+ EXPECT_TRUE(options.IsValid(&error)) << error;
+}
+
+TEST(SolverOptions, DefaultLineSearchOptionsAreValid) {
+ Solver::Options options;
+ options.minimizer_type = LINE_SEARCH;
+ string error;
+ EXPECT_TRUE(options.IsValid(&error)) << error;
+}
+
+struct QuadraticCostFunctor {
+ template <typename T> bool operator()(const T* const x,
+ T* residual) const {
+ residual[0] = T(5.0) - *x;
+ return true;
+ }
+
+ static CostFunction* Create() {
+ return new AutoDiffCostFunction<QuadraticCostFunctor, 1, 1>(
+ new QuadraticCostFunctor);
+ }
+};
+
+struct RememberingCallback : public IterationCallback {
+ explicit RememberingCallback(double *x) : calls(0), x(x) {}
+ virtual ~RememberingCallback() {}
+ virtual CallbackReturnType operator()(const IterationSummary& summary) {
+ x_values.push_back(*x);
+ return SOLVER_CONTINUE;
+ }
+ int calls;
+ double *x;
+ vector<double> x_values;
+};
+
+TEST(Solver, UpdateStateEveryIterationOption) {
+ double x = 50.0;
+ const double original_x = x;
+
+ scoped_ptr<CostFunction> cost_function(QuadraticCostFunctor::Create());
+ Problem::Options problem_options;
+ problem_options.cost_function_ownership = DO_NOT_TAKE_OWNERSHIP;
+ Problem problem(problem_options);
+ problem.AddResidualBlock(cost_function.get(), NULL, &x);
+
+ Solver::Options options;
+ options.linear_solver_type = DENSE_QR;
+
+ RememberingCallback callback(&x);
+ options.callbacks.push_back(&callback);
+
+ Solver::Summary summary;
+
+ int num_iterations;
+
+ // First try: no updating.
+ Solve(options, &problem, &summary);
+ num_iterations = summary.num_successful_steps +
+ summary.num_unsuccessful_steps;
+ EXPECT_GT(num_iterations, 1);
+ for (int i = 0; i < callback.x_values.size(); ++i) {
+ EXPECT_EQ(50.0, callback.x_values[i]);
+ }
+
+ // Second try: with updating
+ x = 50.0;
+ options.update_state_every_iteration = true;
+ callback.x_values.clear();
+ Solve(options, &problem, &summary);
+ num_iterations = summary.num_successful_steps +
+ summary.num_unsuccessful_steps;
+ EXPECT_GT(num_iterations, 1);
+ EXPECT_EQ(original_x, callback.x_values[0]);
+ EXPECT_NE(original_x, callback.x_values[1]);
+}
+
+// The parameters must be in separate blocks so that they can be individually
+// set constant or not.
+struct Quadratic4DCostFunction {
+ template <typename T> bool operator()(const T* const x,
+ const T* const y,
+ const T* const z,
+ const T* const w,
+ T* residual) const {
+ // A 4-dimension axis-aligned quadratic.
+ residual[0] = T(10.0) - *x +
+ T(20.0) - *y +
+ T(30.0) - *z +
+ T(40.0) - *w;
+ return true;
+ }
+
+ static CostFunction* Create() {
+ return new AutoDiffCostFunction<Quadratic4DCostFunction, 1, 1, 1, 1, 1>(
+ new Quadratic4DCostFunction);
+ }
+};
+
+// A cost function that simply returns its argument.
+class UnaryIdentityCostFunction : public SizedCostFunction<1, 1> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ residuals[0] = parameters[0][0];
+ if (jacobians != NULL && jacobians[0] != NULL) {
+ jacobians[0][0] = 1.0;
+ }
+ return true;
+ }
+};
+
+TEST(Solver, TrustRegionProblemHasNoParameterBlocks) {
+ Problem problem;
+ Solver::Options options;
+ options.minimizer_type = TRUST_REGION;
+ Solver::Summary summary;
+ Solve(options, &problem, &summary);
+ EXPECT_EQ(summary.termination_type, CONVERGENCE);
+ EXPECT_EQ(summary.message,
+ "Function tolerance reached. "
+ "No non-constant parameter blocks found.");
+}
+
+TEST(Solver, LineSearchProblemHasNoParameterBlocks) {
+ Problem problem;
+ Solver::Options options;
+ options.minimizer_type = LINE_SEARCH;
+ Solver::Summary summary;
+ Solve(options, &problem, &summary);
+ EXPECT_EQ(summary.termination_type, CONVERGENCE);
+ EXPECT_EQ(summary.message,
+ "Function tolerance reached. "
+ "No non-constant parameter blocks found.");
+}
+
+TEST(Solver, TrustRegionProblemHasZeroResiduals) {
+ Problem problem;
+ double x = 1;
+ problem.AddParameterBlock(&x, 1);
+ Solver::Options options;
+ options.minimizer_type = TRUST_REGION;
+ Solver::Summary summary;
+ Solve(options, &problem, &summary);
+ EXPECT_EQ(summary.termination_type, CONVERGENCE);
+ EXPECT_EQ(summary.message,
+ "Function tolerance reached. "
+ "No non-constant parameter blocks found.");
+}
+
+TEST(Solver, LineSearchProblemHasZeroResiduals) {
+ Problem problem;
+ double x = 1;
+ problem.AddParameterBlock(&x, 1);
+ Solver::Options options;
+ options.minimizer_type = LINE_SEARCH;
+ Solver::Summary summary;
+ Solve(options, &problem, &summary);
+ EXPECT_EQ(summary.termination_type, CONVERGENCE);
+ EXPECT_EQ(summary.message,
+ "Function tolerance reached. "
+ "No non-constant parameter blocks found.");
+}
+
+TEST(Solver, TrustRegionProblemIsConstant) {
+ Problem problem;
+ double x = 1;
+ problem.AddResidualBlock(new UnaryIdentityCostFunction, NULL, &x);
+ problem.SetParameterBlockConstant(&x);
+ Solver::Options options;
+ options.minimizer_type = TRUST_REGION;
+ Solver::Summary summary;
+ Solve(options, &problem, &summary);
+ EXPECT_EQ(summary.termination_type, CONVERGENCE);
+ EXPECT_EQ(summary.initial_cost, 1.0 / 2.0);
+ EXPECT_EQ(summary.final_cost, 1.0 / 2.0);
+}
+
+TEST(Solver, LineSearchProblemIsConstant) {
+ Problem problem;
+ double x = 1;
+ problem.AddResidualBlock(new UnaryIdentityCostFunction, NULL, &x);
+ problem.SetParameterBlockConstant(&x);
+ Solver::Options options;
+ options.minimizer_type = LINE_SEARCH;
+ Solver::Summary summary;
+ Solve(options, &problem, &summary);
+ EXPECT_EQ(summary.termination_type, CONVERGENCE);
+ EXPECT_EQ(summary.initial_cost, 1.0 / 2.0);
+ EXPECT_EQ(summary.final_cost, 1.0 / 2.0);
+}
+
+#if defined(CERES_NO_SUITESPARSE)
+TEST(Solver, SparseNormalCholeskyNoSuiteSparse) {
+ Solver::Options options;
+ options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+ options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+ string message;
+ EXPECT_FALSE(options.IsValid(&message));
+}
+#endif
+
+#if defined(CERES_NO_CXSPARSE)
+TEST(Solver, SparseNormalCholeskyNoCXSparse) {
+ Solver::Options options;
+ options.sparse_linear_algebra_library_type = CX_SPARSE;
+ options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+ string message;
+ EXPECT_FALSE(options.IsValid(&message));
+}
+#endif
+
+TEST(Solver, IterativeLinearSolverForDogleg) {
+ Solver::Options options;
+ options.trust_region_strategy_type = DOGLEG;
+ string message;
+ options.linear_solver_type = ITERATIVE_SCHUR;
+ EXPECT_FALSE(options.IsValid(&message));
+
+ options.linear_solver_type = CGNR;
+ EXPECT_FALSE(options.IsValid(&message));
+}
+
+TEST(Solver, LinearSolverTypeNormalOperation) {
+ Solver::Options options;
+ options.linear_solver_type = DENSE_QR;
+
+ string message;
+ EXPECT_TRUE(options.IsValid(&message));
+
+ options.linear_solver_type = DENSE_NORMAL_CHOLESKY;
+ EXPECT_TRUE(options.IsValid(&message));
+
+ options.linear_solver_type = DENSE_SCHUR;
+ EXPECT_TRUE(options.IsValid(&message));
+
+ options.linear_solver_type = SPARSE_SCHUR;
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
+ EXPECT_FALSE(options.IsValid(&message));
+#else
+ EXPECT_TRUE(options.IsValid(&message));
+#endif
+
+ options.linear_solver_type = ITERATIVE_SCHUR;
+ EXPECT_TRUE(options.IsValid(&message));
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc
index f1a5237..0940815 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.cc
+++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -28,7 +28,8 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
#include "ceres/sparse_normal_cholesky_solver.h"
@@ -45,6 +46,8 @@
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "ceres/wall_time.h"
+#include "Eigen/SparseCore"
+
namespace ceres {
namespace internal {
@@ -53,23 +56,23 @@ SparseNormalCholeskySolver::SparseNormalCholeskySolver(
const LinearSolver::Options& options)
: factor_(NULL),
cxsparse_factor_(NULL),
- options_(options) {
+ options_(options){
}
-SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {
-#ifndef CERES_NO_SUITESPARSE
+void SparseNormalCholeskySolver::FreeFactorization() {
if (factor_ != NULL) {
ss_.Free(factor_);
factor_ = NULL;
}
-#endif
-#ifndef CERES_NO_CXSPARSE
if (cxsparse_factor_ != NULL) {
cxsparse_.Free(cxsparse_factor_);
cxsparse_factor_ = NULL;
}
-#endif // CERES_NO_CXSPARSE
+}
+
+SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {
+ FreeFactorization();
}
LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
@@ -77,177 +80,303 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double * x) {
+
+ const int num_cols = A->num_cols();
+ VectorRef(x, num_cols).setZero();
+ A->LeftMultiply(b, x);
+
+ if (per_solve_options.D != NULL) {
+ // Temporarily append a diagonal block to the A matrix, but undo
+ // it before returning the matrix to the user.
+ scoped_ptr<CompressedRowSparseMatrix> regularizer;
+ if (A->col_blocks().size() > 0) {
+ regularizer.reset(CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
+ per_solve_options.D, A->col_blocks()));
+ } else {
+ regularizer.reset(new CompressedRowSparseMatrix(
+ per_solve_options.D, num_cols));
+ }
+ A->AppendRows(*regularizer);
+ }
+
+ LinearSolver::Summary summary;
switch (options_.sparse_linear_algebra_library_type) {
case SUITE_SPARSE:
- return SolveImplUsingSuiteSparse(A, b, per_solve_options, x);
+ summary = SolveImplUsingSuiteSparse(A, per_solve_options, x);
+ break;
case CX_SPARSE:
- return SolveImplUsingCXSparse(A, b, per_solve_options, x);
+ summary = SolveImplUsingCXSparse(A, per_solve_options, x);
+ break;
+ case EIGEN_SPARSE:
+ summary = SolveImplUsingEigen(A, per_solve_options, x);
+ break;
default:
LOG(FATAL) << "Unknown sparse linear algebra library : "
<< options_.sparse_linear_algebra_library_type;
}
- LOG(FATAL) << "Unknown sparse linear algebra library : "
- << options_.sparse_linear_algebra_library_type;
- return LinearSolver::Summary();
+ if (per_solve_options.D != NULL) {
+ A->DeleteRows(num_cols);
+ }
+
+ return summary;
}
-#ifndef CERES_NO_CXSPARSE
-LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingEigen(
CompressedRowSparseMatrix* A,
- const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
- double * x) {
- EventLogger event_logger("SparseNormalCholeskySolver::CXSparse::Solve");
+ double * rhs_and_solution) {
+#ifndef CERES_USE_EIGEN_SPARSE
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message =
+ "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE "
+ "because Ceres was not built with support for "
+ "Eigen's SimplicialLDLT decomposition. "
+ "This requires enabling building with -DEIGENSPARSE=ON.";
+ return summary;
+
+#else
+
+ EventLogger event_logger("SparseNormalCholeskySolver::Eigen::Solve");
LinearSolver::Summary summary;
summary.num_iterations = 1;
- const int num_cols = A->num_cols();
- Vector Atb = Vector::Zero(num_cols);
- A->LeftMultiply(b, Atb.data());
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message = "Success.";
- if (per_solve_options.D != NULL) {
- // Temporarily append a diagonal block to the A matrix, but undo
- // it before returning the matrix to the user.
- CompressedRowSparseMatrix D(per_solve_options.D, num_cols);
- A->AppendRows(D);
+ // Compute the normal equations. J'J delta = J'f and solve them
+ // using a sparse Cholesky factorization. Notice that when compared
+ // to SuiteSparse we have to explicitly compute the normal equations
+ // before they can be factorized. CHOLMOD/SuiteSparse on the other
+ // hand can just work off of Jt to compute the Cholesky
+ // factorization of the normal equations.
+ //
+ // TODO(sameeragarwal): See note about how this maybe a bad idea for
+ // dynamic sparsity.
+ if (outer_product_.get() == NULL) {
+ outer_product_.reset(
+ CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
+ *A, &pattern_));
}
- VectorRef(x, num_cols).setZero();
+ CompressedRowSparseMatrix::ComputeOuterProduct(
+ *A, pattern_, outer_product_.get());
+
+ // Map to an upper triangular column major matrix.
+ //
+ // outer_product_ is a compressed row sparse matrix and in lower
+ // triangular form, when mapped to a compressed column sparse
+ // matrix, it becomes an upper triangular matrix.
+ Eigen::MappedSparseMatrix<double, Eigen::ColMajor> AtA(
+ outer_product_->num_rows(),
+ outer_product_->num_rows(),
+ outer_product_->num_nonzeros(),
+ outer_product_->mutable_rows(),
+ outer_product_->mutable_cols(),
+ outer_product_->mutable_values());
+
+ const Vector b = VectorRef(rhs_and_solution, outer_product_->num_rows());
+ if (simplicial_ldlt_.get() == NULL || options_.dynamic_sparsity) {
+ simplicial_ldlt_.reset(new SimplicialLDLT);
+ // This is a crappy way to be doing this. But right now Eigen does
+ // not expose a way to do symbolic analysis with a given
+ // permutation pattern, so we cannot use a block analysis of the
+ // Jacobian.
+ simplicial_ldlt_->analyzePattern(AtA);
+ if (simplicial_ldlt_->info() != Eigen::Success) {
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message =
+ "Eigen failure. Unable to find symbolic factorization.";
+ return summary;
+ }
+ }
+ event_logger.AddEvent("Analysis");
+
+ simplicial_ldlt_->factorize(AtA);
+ if(simplicial_ldlt_->info() != Eigen::Success) {
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message =
+ "Eigen failure. Unable to find numeric factorization.";
+ return summary;
+ }
+
+ VectorRef(rhs_and_solution, outer_product_->num_rows()) =
+ simplicial_ldlt_->solve(b);
+ if(simplicial_ldlt_->info() != Eigen::Success) {
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message =
+ "Eigen failure. Unable to do triangular solve.";
+ return summary;
+ }
+
+ event_logger.AddEvent("Solve");
+ return summary;
+#endif // EIGEN_USE_EIGEN_SPARSE
+}
+
+
+
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
+ CompressedRowSparseMatrix* A,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double * rhs_and_solution) {
+#ifdef CERES_NO_CXSPARSE
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message =
+ "SPARSE_NORMAL_CHOLESKY cannot be used with CX_SPARSE "
+ "because Ceres was not built with support for CXSparse. "
+ "This requires enabling building with -DCXSPARSE=ON.";
+
+ return summary;
+
+#else
+
+ EventLogger event_logger("SparseNormalCholeskySolver::CXSparse::Solve");
- // Wrap the augmented Jacobian in a compressed sparse column matrix.
- cs_di At = cxsparse_.CreateSparseMatrixTransposeView(A);
+ LinearSolver::Summary summary;
+ summary.num_iterations = 1;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message = "Success.";
// Compute the normal equations. J'J delta = J'f and solve them
// using a sparse Cholesky factorization. Notice that when compared
- // to SuiteSparse we have to explicitly compute the transpose of Jt,
- // and then the normal equations before they can be
- // factorized. CHOLMOD/SuiteSparse on the other hand can just work
- // off of Jt to compute the Cholesky factorization of the normal
- // equations.
- cs_di* A2 = cxsparse_.TransposeMatrix(&At);
- cs_di* AtA = cxsparse_.MatrixMatrixMultiply(&At, A2);
-
- cxsparse_.Free(A2);
- if (per_solve_options.D != NULL) {
- A->DeleteRows(num_cols);
+ // to SuiteSparse we have to explicitly compute the normal equations
+ // before they can be factorized. CHOLMOD/SuiteSparse on the other
+ // hand can just work off of Jt to compute the Cholesky
+ // factorization of the normal equations.
+ //
+ // TODO(sameeragarwal): If dynamic sparsity is enabled, then this is
+ // not a good idea performance wise, since the jacobian has far too
+ // many entries and the program will go crazy with memory.
+ if (outer_product_.get() == NULL) {
+ outer_product_.reset(
+ CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
+ *A, &pattern_));
}
+
+ CompressedRowSparseMatrix::ComputeOuterProduct(
+ *A, pattern_, outer_product_.get());
+ cs_di AtA_view =
+ cxsparse_.CreateSparseMatrixTransposeView(outer_product_.get());
+ cs_di* AtA = &AtA_view;
+
event_logger.AddEvent("Setup");
// Compute symbolic factorization if not available.
+ if (options_.dynamic_sparsity) {
+ FreeFactorization();
+ }
if (cxsparse_factor_ == NULL) {
if (options_.use_postordering) {
- cxsparse_factor_ =
- CHECK_NOTNULL(cxsparse_.BlockAnalyzeCholesky(AtA,
- A->col_blocks(),
- A->col_blocks()));
+ cxsparse_factor_ = cxsparse_.BlockAnalyzeCholesky(AtA,
+ A->col_blocks(),
+ A->col_blocks());
} else {
- cxsparse_factor_ =
- CHECK_NOTNULL(cxsparse_.AnalyzeCholeskyWithNaturalOrdering(AtA));
+ if (options_.dynamic_sparsity) {
+ cxsparse_factor_ = cxsparse_.AnalyzeCholesky(AtA);
+ } else {
+ cxsparse_factor_ = cxsparse_.AnalyzeCholeskyWithNaturalOrdering(AtA);
+ }
}
}
event_logger.AddEvent("Analysis");
- // Solve the linear system.
- if (cxsparse_.SolveCholesky(AtA, cxsparse_factor_, Atb.data())) {
- VectorRef(x, Atb.rows()) = Atb;
- summary.termination_type = TOLERANCE;
+ if (cxsparse_factor_ == NULL) {
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message =
+ "CXSparse failure. Unable to find symbolic factorization.";
+ } else if (!cxsparse_.SolveCholesky(AtA, cxsparse_factor_, rhs_and_solution)) {
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message = "CXSparse::SolveCholesky failed.";
}
event_logger.AddEvent("Solve");
- cxsparse_.Free(AtA);
- event_logger.AddEvent("Teardown");
return summary;
-}
-#else
-LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
- CompressedRowSparseMatrix* A,
- const double* b,
- const LinearSolver::PerSolveOptions& per_solve_options,
- double * x) {
- LOG(FATAL) << "No CXSparse support in Ceres.";
-
- // Unreachable but MSVC does not know this.
- return LinearSolver::Summary();
-}
#endif
+}
-#ifndef CERES_NO_SUITESPARSE
LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
CompressedRowSparseMatrix* A,
- const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
- double * x) {
- EventLogger event_logger("SparseNormalCholeskySolver::SuiteSparse::Solve");
+ double * rhs_and_solution) {
+#ifdef CERES_NO_SUITESPARSE
- const int num_cols = A->num_cols();
LinearSolver::Summary summary;
- Vector Atb = Vector::Zero(num_cols);
- A->LeftMultiply(b, Atb.data());
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message =
+ "SPARSE_NORMAL_CHOLESKY cannot be used with SUITE_SPARSE "
+ "because Ceres was not built with support for SuiteSparse. "
+ "This requires enabling building with -DSUITESPARSE=ON.";
+ return summary;
- if (per_solve_options.D != NULL) {
- // Temporarily append a diagonal block to the A matrix, but undo it before
- // returning the matrix to the user.
- CompressedRowSparseMatrix D(per_solve_options.D, num_cols);
- A->AppendRows(D);
- }
+#else
- VectorRef(x, num_cols).setZero();
+ EventLogger event_logger("SparseNormalCholeskySolver::SuiteSparse::Solve");
+ LinearSolver::Summary summary;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.num_iterations = 1;
+ summary.message = "Success.";
+ const int num_cols = A->num_cols();
cholmod_sparse lhs = ss_.CreateSparseMatrixTransposeView(A);
- cholmod_dense* rhs = ss_.CreateDenseVector(Atb.data(), num_cols, num_cols);
event_logger.AddEvent("Setup");
+ if (options_.dynamic_sparsity) {
+ FreeFactorization();
+ }
if (factor_ == NULL) {
if (options_.use_postordering) {
- factor_ =
- CHECK_NOTNULL(ss_.BlockAnalyzeCholesky(&lhs,
- A->col_blocks(),
- A->row_blocks()));
+ factor_ = ss_.BlockAnalyzeCholesky(&lhs,
+ A->col_blocks(),
+ A->row_blocks(),
+ &summary.message);
} else {
- factor_ =
- CHECK_NOTNULL(ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs));
+ if (options_.dynamic_sparsity) {
+ factor_ = ss_.AnalyzeCholesky(&lhs, &summary.message);
+ } else {
+ factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs, &summary.message);
+ }
}
}
-
event_logger.AddEvent("Analysis");
- cholmod_dense* sol = ss_.SolveCholesky(&lhs, factor_, rhs);
- event_logger.AddEvent("Solve");
-
- ss_.Free(rhs);
- rhs = NULL;
+ if (factor_ == NULL) {
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ // No need to set message as it has already been set by the
+ // symbolic analysis routines above.
+ return summary;
+ }
- if (per_solve_options.D != NULL) {
- A->DeleteRows(num_cols);
+ summary.termination_type = ss_.Cholesky(&lhs, factor_, &summary.message);
+ if (summary.termination_type != LINEAR_SOLVER_SUCCESS) {
+ return summary;
}
- summary.num_iterations = 1;
- if (sol != NULL) {
- memcpy(x, sol->x, num_cols * sizeof(*x));
+ cholmod_dense* rhs = ss_.CreateDenseVector(rhs_and_solution, num_cols, num_cols);
+ cholmod_dense* solution = ss_.Solve(factor_, rhs, &summary.message);
+ event_logger.AddEvent("Solve");
- ss_.Free(sol);
- sol = NULL;
- summary.termination_type = TOLERANCE;
+ ss_.Free(rhs);
+ if (solution != NULL) {
+ memcpy(rhs_and_solution, solution->x, num_cols * sizeof(*rhs_and_solution));
+ ss_.Free(solution);
+ } else {
+ // No need to set message as it has already been set by the
+ // numeric factorization routine above.
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
}
event_logger.AddEvent("Teardown");
return summary;
-}
-#else
-LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
- CompressedRowSparseMatrix* A,
- const double* b,
- const LinearSolver::PerSolveOptions& per_solve_options,
- double * x) {
- LOG(FATAL) << "No SuiteSparse support in Ceres.";
-
- // Unreachable but MSVC does not know this.
- return LinearSolver::Summary();
-}
#endif
+}
} // namespace internal
} // namespace ceres
-
-#endif // !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
diff --git a/internal/ceres/sparse_normal_cholesky_solver.h b/internal/ceres/sparse_normal_cholesky_solver.h
index 61111b4..6572835 100644
--- a/internal/ceres/sparse_normal_cholesky_solver.h
+++ b/internal/ceres/sparse_normal_cholesky_solver.h
@@ -34,12 +34,17 @@
#ifndef CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
#define CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
-#if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
-#include "ceres/cxsparse.h"
#include "ceres/internal/macros.h"
#include "ceres/linear_solver.h"
#include "ceres/suitesparse.h"
+#include "ceres/cxsparse.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#include "Eigen/SparseCholesky"
+#endif
namespace ceres {
namespace internal {
@@ -62,16 +67,22 @@ class SparseNormalCholeskySolver : public CompressedRowSparseMatrixSolver {
LinearSolver::Summary SolveImplUsingSuiteSparse(
CompressedRowSparseMatrix* A,
- const double* b,
const LinearSolver::PerSolveOptions& options,
- double* x);
+ double* rhs_and_solution);
// Crashes if CSparse is not installed.
LinearSolver::Summary SolveImplUsingCXSparse(
CompressedRowSparseMatrix* A,
- const double* b,
const LinearSolver::PerSolveOptions& options,
- double* x);
+ double* rhs_and_solution);
+
+ // Crashes if CERES_USE_LGPGL_CODE is not defined.
+ LinearSolver::Summary SolveImplUsingEigen(
+ CompressedRowSparseMatrix* A,
+ const LinearSolver::PerSolveOptions& options,
+ double* rhs_and_solution);
+
+ void FreeFactorization();
SuiteSparse ss_;
// Cached factorization
@@ -81,6 +92,14 @@ class SparseNormalCholeskySolver : public CompressedRowSparseMatrixSolver {
// Cached factorization
cs_dis* cxsparse_factor_;
+#ifdef CERES_USE_EIGEN_SPARSE
+ typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>,
+ Eigen::Upper> SimplicialLDLT;
+ scoped_ptr<SimplicialLDLT> simplicial_ldlt_;
+#endif
+
+ scoped_ptr<CompressedRowSparseMatrix> outer_product_;
+ vector<int> pattern_;
const LinearSolver::Options options_;
CERES_DISALLOW_COPY_AND_ASSIGN(SparseNormalCholeskySolver);
};
@@ -88,5 +107,4 @@ class SparseNormalCholeskySolver : public CompressedRowSparseMatrixSolver {
} // namespace internal
} // namespace ceres
-#endif // !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARSE)
#endif // CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
diff --git a/internal/ceres/stringprintf.cc b/internal/ceres/stringprintf.cc
index ce20467..0f85f05 100644
--- a/internal/ceres/stringprintf.cc
+++ b/internal/ceres/stringprintf.cc
@@ -43,7 +43,9 @@ namespace internal {
#ifdef _MSC_VER
enum { IS_COMPILER_MSVC = 1 };
-#define va_copy(d,s) ((d) = (s))
+#if _MSC_VER < 1800
+#define va_copy(d, s) ((d) = (s))
+#endif
#else
enum { IS_COMPILER_MSVC = 0 };
#endif
diff --git a/internal/ceres/suitesparse.cc b/internal/ceres/suitesparse.cc
index 9de32fd..1df7566 100644
--- a/internal/ceres/suitesparse.cc
+++ b/internal/ceres/suitesparse.cc
@@ -28,6 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_NO_SUITESPARSE
#include "ceres/suitesparse.h"
@@ -35,6 +38,7 @@
#include "cholmod.h"
#include "ceres/compressed_col_sparse_matrix_utils.h"
#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/linear_solver.h"
#include "ceres/triplet_sparse_matrix.h"
namespace ceres {
@@ -120,7 +124,8 @@ cholmod_dense* SuiteSparse::CreateDenseVector(const double* x,
return v;
}
-cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A) {
+cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A,
+ string* message) {
// Cholmod can try multiple re-ordering strategies to find a fill
// reducing ordering. Here we just tell it use AMD with automatic
// matrix dependence choice of supernodal versus simplicial
@@ -130,31 +135,35 @@ cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A) {
cc_.supernodal = CHOLMOD_AUTO;
cholmod_factor* factor = cholmod_analyze(A, &cc_);
- CHECK_EQ(cc_.status, CHOLMOD_OK)
- << "Cholmod symbolic analysis failed " << cc_.status;
- CHECK_NOTNULL(factor);
-
if (VLOG_IS_ON(2)) {
cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_);
}
- return factor;
+ if (cc_.status != CHOLMOD_OK) {
+ *message = StringPrintf("cholmod_analyze failed. error code: %d",
+ cc_.status);
+ return NULL;
+ }
+
+ return CHECK_NOTNULL(factor);
}
cholmod_factor* SuiteSparse::BlockAnalyzeCholesky(
cholmod_sparse* A,
const vector<int>& row_blocks,
- const vector<int>& col_blocks) {
+ const vector<int>& col_blocks,
+ string* message) {
vector<int> ordering;
if (!BlockAMDOrdering(A, row_blocks, col_blocks, &ordering)) {
return NULL;
}
- return AnalyzeCholeskyWithUserOrdering(A, ordering);
+ return AnalyzeCholeskyWithUserOrdering(A, ordering, message);
}
cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering(
cholmod_sparse* A,
- const vector<int>& ordering) {
+ const vector<int>& ordering,
+ string* message) {
CHECK_EQ(ordering.size(), A->nrow);
cc_.nmethods = 1;
@@ -162,33 +171,36 @@ cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering(
cholmod_factor* factor =
cholmod_analyze_p(A, const_cast<int*>(&ordering[0]), NULL, 0, &cc_);
- CHECK_EQ(cc_.status, CHOLMOD_OK)
- << "Cholmod symbolic analysis failed " << cc_.status;
- CHECK_NOTNULL(factor);
-
if (VLOG_IS_ON(2)) {
cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_);
}
+ if (cc_.status != CHOLMOD_OK) {
+ *message = StringPrintf("cholmod_analyze failed. error code: %d",
+ cc_.status);
+ return NULL;
+ }
- return factor;
+ return CHECK_NOTNULL(factor);
}
cholmod_factor* SuiteSparse::AnalyzeCholeskyWithNaturalOrdering(
- cholmod_sparse* A) {
+ cholmod_sparse* A,
+ string* message) {
cc_.nmethods = 1;
cc_.method[0].ordering = CHOLMOD_NATURAL;
cc_.postorder = 0;
cholmod_factor* factor = cholmod_analyze(A, &cc_);
- CHECK_EQ(cc_.status, CHOLMOD_OK)
- << "Cholmod symbolic analysis failed " << cc_.status;
- CHECK_NOTNULL(factor);
-
if (VLOG_IS_ON(2)) {
cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_);
}
+ if (cc_.status != CHOLMOD_OK) {
+ *message = StringPrintf("cholmod_analyze failed. error code: %d",
+ cc_.status);
+ return NULL;
+ }
- return factor;
+ return CHECK_NOTNULL(factor);
}
bool SuiteSparse::BlockAMDOrdering(const cholmod_sparse* A,
@@ -233,7 +245,9 @@ bool SuiteSparse::BlockAMDOrdering(const cholmod_sparse* A,
return true;
}
-bool SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) {
+LinearSolverTerminationType SuiteSparse::Cholesky(cholmod_sparse* A,
+ cholmod_factor* L,
+ string* message) {
CHECK_NOTNULL(A);
CHECK_NOTNULL(L);
@@ -245,7 +259,7 @@ bool SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) {
cc_.print = 0;
cc_.quick_return_if_not_posdef = 1;
- int status = cholmod_factorize(A, L, &cc_);
+ int cholmod_status = cholmod_factorize(A, L, &cc_);
cc_.print = old_print_level;
// TODO(sameeragarwal): This switch statement is not consistent. It
@@ -257,84 +271,73 @@ bool SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) {
// (e.g. out of memory).
switch (cc_.status) {
case CHOLMOD_NOT_INSTALLED:
- LOG(WARNING) << "CHOLMOD failure: Method not installed.";
- return false;
+ *message = "CHOLMOD failure: Method not installed.";
+ return LINEAR_SOLVER_FATAL_ERROR;
case CHOLMOD_OUT_OF_MEMORY:
- LOG(WARNING) << "CHOLMOD failure: Out of memory.";
- return false;
+ *message = "CHOLMOD failure: Out of memory.";
+ return LINEAR_SOLVER_FATAL_ERROR;
case CHOLMOD_TOO_LARGE:
- LOG(WARNING) << "CHOLMOD failure: Integer overflow occured.";
- return false;
+ *message = "CHOLMOD failure: Integer overflow occured.";
+ return LINEAR_SOLVER_FATAL_ERROR;
case CHOLMOD_INVALID:
- LOG(WARNING) << "CHOLMOD failure: Invalid input.";
- return false;
+ *message = "CHOLMOD failure: Invalid input.";
+ return LINEAR_SOLVER_FATAL_ERROR;
case CHOLMOD_NOT_POSDEF:
- // TODO(sameeragarwal): These two warnings require more
- // sophisticated handling going forward. For now we will be
- // strict and treat them as failures.
- LOG(WARNING) << "CHOLMOD warning: Matrix not positive definite.";
- return false;
+ *message = "CHOLMOD warning: Matrix not positive definite.";
+ return LINEAR_SOLVER_FAILURE;
case CHOLMOD_DSMALL:
- LOG(WARNING) << "CHOLMOD warning: D for LDL' or diag(L) or "
- << "LL' has tiny absolute value.";
- return false;
+ *message = "CHOLMOD warning: D for LDL' or diag(L) or "
+ "LL' has tiny absolute value.";
+ return LINEAR_SOLVER_FAILURE;
case CHOLMOD_OK:
- if (status != 0) {
- return true;
+ if (cholmod_status != 0) {
+ return LINEAR_SOLVER_SUCCESS;
}
- LOG(WARNING) << "CHOLMOD failure: cholmod_factorize returned zero "
- << "but cholmod_common::status is CHOLMOD_OK."
- << "Please report this to ceres-solver@googlegroups.com.";
- return false;
+
+ *message = "CHOLMOD failure: cholmod_factorize returned false "
+ "but cholmod_common::status is CHOLMOD_OK."
+ "Please report this to ceres-solver@googlegroups.com.";
+ return LINEAR_SOLVER_FATAL_ERROR;
default:
- LOG(WARNING) << "Unknown cholmod return code. "
- << "Please report this to ceres-solver@googlegroups.com.";
- return false;
+ *message =
+ StringPrintf("Unknown cholmod return code: %d. "
+ "Please report this to ceres-solver@googlegroups.com.",
+ cc_.status);
+ return LINEAR_SOLVER_FATAL_ERROR;
}
- return false;
+
+ return LINEAR_SOLVER_FATAL_ERROR;
}
cholmod_dense* SuiteSparse::Solve(cholmod_factor* L,
- cholmod_dense* b) {
+ cholmod_dense* b,
+ string* message) {
if (cc_.status != CHOLMOD_OK) {
- LOG(WARNING) << "CHOLMOD status NOT OK";
+ *message = "cholmod_solve failed. CHOLMOD status is not CHOLMOD_OK";
return NULL;
}
return cholmod_solve(CHOLMOD_A, L, b, &cc_);
}
-cholmod_dense* SuiteSparse::SolveCholesky(cholmod_sparse* A,
- cholmod_factor* L,
- cholmod_dense* b) {
- CHECK_NOTNULL(A);
- CHECK_NOTNULL(L);
- CHECK_NOTNULL(b);
-
- if (Cholesky(A, L)) {
- return Solve(L, b);
- }
-
- return NULL;
-}
-
-void SuiteSparse::ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
+bool SuiteSparse::ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
int* ordering) {
- cholmod_amd(matrix, NULL, 0, ordering, &cc_);
+ return cholmod_amd(matrix, NULL, 0, ordering, &cc_);
}
-void SuiteSparse::ConstrainedApproximateMinimumDegreeOrdering(
+bool SuiteSparse::ConstrainedApproximateMinimumDegreeOrdering(
cholmod_sparse* matrix,
int* constraints,
int* ordering) {
#ifndef CERES_NO_CAMD
- cholmod_camd(matrix, NULL, 0, constraints, ordering, &cc_);
+ return cholmod_camd(matrix, NULL, 0, constraints, ordering, &cc_);
#else
LOG(FATAL) << "Congratulations you have found a bug in Ceres."
<< "Ceres Solver was compiled with SuiteSparse "
<< "version 4.1.0 or less. Calling this function "
<< "in that case is a bug. Please contact the"
<< "the Ceres Solver developers.";
+ return false;
#endif
}
diff --git a/internal/ceres/suitesparse.h b/internal/ceres/suitesparse.h
index 16f298e..baab899 100644
--- a/internal/ceres/suitesparse.h
+++ b/internal/ceres/suitesparse.h
@@ -33,6 +33,8 @@
#ifndef CERES_INTERNAL_SUITESPARSE_H_
#define CERES_INTERNAL_SUITESPARSE_H_
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
#ifndef CERES_NO_SUITESPARSE
@@ -41,6 +43,7 @@
#include <vector>
#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
#include "cholmod.h"
#include "glog/logging.h"
#include "SuiteSparseQR.hpp"
@@ -138,12 +141,15 @@ class SuiteSparse {
// A is not modified, only the pattern of non-zeros of A is used,
// the actual numerical values in A are of no consequence.
//
+ // message contains an explanation of the failures if any.
+ //
// Caller owns the result.
- cholmod_factor* AnalyzeCholesky(cholmod_sparse* A);
+ cholmod_factor* AnalyzeCholesky(cholmod_sparse* A, string* message);
cholmod_factor* BlockAnalyzeCholesky(cholmod_sparse* A,
const vector<int>& row_blocks,
- const vector<int>& col_blocks);
+ const vector<int>& col_blocks,
+ string* message);
// If A is symmetric, then compute the symbolic Cholesky
// factorization of A(ordering, ordering). If A is unsymmetric, then
@@ -153,33 +159,38 @@ class SuiteSparse {
// A is not modified, only the pattern of non-zeros of A is used,
// the actual numerical values in A are of no consequence.
//
+ // message contains an explanation of the failures if any.
+ //
// Caller owns the result.
cholmod_factor* AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A,
- const vector<int>& ordering);
+ const vector<int>& ordering,
+ string* message);
// Perform a symbolic factorization of A without re-ordering A. No
// postordering of the elimination tree is performed. This ensures
// that the symbolic factor does not introduce an extra permutation
// on the matrix. See the documentation for CHOLMOD for more details.
- cholmod_factor* AnalyzeCholeskyWithNaturalOrdering(cholmod_sparse* A);
+ //
+ // message contains an explanation of the failures if any.
+ cholmod_factor* AnalyzeCholeskyWithNaturalOrdering(cholmod_sparse* A,
+ string* message);
// Use the symbolic factorization in L, to find the numerical
// factorization for the matrix A or AA^T. Return true if
// successful, false otherwise. L contains the numeric factorization
// on return.
- bool Cholesky(cholmod_sparse* A, cholmod_factor* L);
+ //
+ // message contains an explanation of the failures if any.
+ LinearSolverTerminationType Cholesky(cholmod_sparse* A,
+ cholmod_factor* L,
+ string* message);
// Given a Cholesky factorization of a matrix A = LL^T, solve the
// linear system Ax = b, and return the result. If the Solve fails
// NULL is returned. Caller owns the result.
- cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b);
-
- // Combine the calls to Cholesky and Solve into a single call. If
- // the cholesky factorization or the solve fails, return
- // NULL. Caller owns the result.
- cholmod_dense* SolveCholesky(cholmod_sparse* A,
- cholmod_factor* L,
- cholmod_dense* b);
+ //
+ // message contains an explanation of the failures if any.
+ cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b, string* message);
// By virtue of the modeling layer in Ceres being block oriented,
// all the matrices used by Ceres are also block oriented. When
@@ -211,7 +222,7 @@ class SuiteSparse {
// Find a fill reducing approximate minimum degree
// ordering. ordering is expected to be large enough to hold the
// ordering.
- void ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering);
+ bool ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering);
// Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
@@ -241,7 +252,7 @@ class SuiteSparse {
//
// If CERES_NO_CAMD is defined then calling this function will
// result in a crash.
- void ConstrainedApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
+ bool ConstrainedApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
int* constraints,
int* ordering);
@@ -272,9 +283,24 @@ class SuiteSparse {
#else // CERES_NO_SUITESPARSE
-class SuiteSparse {};
typedef void cholmod_factor;
+class SuiteSparse {
+ public:
+ // Defining this static function even when SuiteSparse is not
+ // available, allows client code to check for the presence of CAMD
+ // without checking for the absence of the CERES_NO_CAMD symbol.
+ //
+ // This is safer because the symbol maybe missing due to a user
+ // accidently not including suitesparse.h in their code when
+ // checking for the symbol.
+ static bool IsConstrainedApproximateMinimumDegreeOrderingAvailable() {
+ return false;
+ }
+
+ void Free(void*) {};
+};
+
#endif // CERES_NO_SUITESPARSE
#endif // CERES_INTERNAL_SUITESPARSE_H_
diff --git a/internal/ceres/summary_utils.cc b/internal/ceres/summary_utils.cc
new file mode 100644
index 0000000..243030c
--- /dev/null
+++ b/internal/ceres/summary_utils.cc
@@ -0,0 +1,66 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <algorithm>
+#include "ceres/summary_utils.h"
+#include "ceres/program.h"
+#include "ceres/solver.h"
+
+namespace ceres {
+namespace internal {
+
+void SetSummaryFinalCost(Solver::Summary* summary) {
+ summary->final_cost = summary->initial_cost;
+ // We need the loop here, instead of just looking at the last
+ // iteration because the minimizer maybe making non-monotonic steps.
+ for (int i = 0; i < summary->iterations.size(); ++i) {
+ const IterationSummary& iteration_summary = summary->iterations[i];
+ summary->final_cost = min(iteration_summary.cost, summary->final_cost);
+ }
+}
+
+void SummarizeGivenProgram(const Program& program, Solver::Summary* summary) {
+ summary->num_parameter_blocks = program.NumParameterBlocks();
+ summary->num_parameters = program.NumParameters();
+ summary->num_effective_parameters = program.NumEffectiveParameters();
+ summary->num_residual_blocks = program.NumResidualBlocks();
+ summary->num_residuals = program.NumResiduals();
+}
+
+void SummarizeReducedProgram(const Program& program, Solver::Summary* summary) {
+ summary->num_parameter_blocks_reduced = program.NumParameterBlocks();
+ summary->num_parameters_reduced = program.NumParameters();
+ summary->num_effective_parameters_reduced = program.NumEffectiveParameters();
+ summary->num_residual_blocks_reduced = program.NumResidualBlocks();
+ summary->num_residuals_reduced = program.NumResiduals();
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/summary_utils.h b/internal/ceres/summary_utils.h
new file mode 100644
index 0000000..9b07987
--- /dev/null
+++ b/internal/ceres/summary_utils.h
@@ -0,0 +1,49 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2014 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_SUMMARY_UTILS_H_
+#define CERES_INTERNAL_SUMMARY_UTILS_H_
+
+#include <vector>
+#include "ceres/solver.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+
+void SummarizeGivenProgram(const Program& program, Solver::Summary* summary);
+void SummarizeReducedProgram(const Program& program, Solver::Summary* summary);
+void SetSummaryFinalCost(Solver::Summary* summary);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SUMMARY_UTILS_H_
diff --git a/internal/ceres/symmetric_linear_solver_test.cc b/internal/ceres/symmetric_linear_solver_test.cc
index f33adb4..ac5a774 100644
--- a/internal/ceres/symmetric_linear_solver_test.cc
+++ b/internal/ceres/symmetric_linear_solver_test.cc
@@ -71,7 +71,7 @@ TEST(ConjugateGradientTest, Solves3x3IdentitySystem) {
LinearSolver::Summary summary =
solver.Solve(A.get(), b.data(), per_solve_options, x.data());
- EXPECT_EQ(summary.termination_type, TOLERANCE);
+ EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
ASSERT_EQ(summary.num_iterations, 1);
ASSERT_DOUBLE_EQ(1, x(0));
@@ -128,7 +128,7 @@ TEST(ConjuateGradientTest, Solves3x3SymmetricSystem) {
LinearSolver::Summary summary =
solver.Solve(A.get(), b.data(), per_solve_options, x.data());
- EXPECT_EQ(summary.termination_type, TOLERANCE);
+ EXPECT_EQ(summary.termination_type, LINEAR_SOLVER_SUCCESS);
ASSERT_DOUBLE_EQ(0, x(0));
ASSERT_DOUBLE_EQ(1, x(1));
diff --git a/internal/ceres/system_test.cc b/internal/ceres/system_test.cc
index 7b0e02d..be56f20 100644
--- a/internal/ceres/system_test.cc
+++ b/internal/ceres/system_test.cc
@@ -43,6 +43,8 @@
#include <cstdlib>
#include <string>
+#include "ceres/internal/port.h"
+
#include "ceres/autodiff_cost_function.h"
#include "ceres/ordered_groups.h"
#include "ceres/problem.h"
@@ -63,9 +65,10 @@ const bool kUserOrdering = false;
// Struct used for configuring the solver.
struct SolverConfig {
- SolverConfig(LinearSolverType linear_solver_type,
- SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
- bool use_automatic_ordering)
+ SolverConfig(
+ LinearSolverType linear_solver_type,
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+ bool use_automatic_ordering)
: linear_solver_type(linear_solver_type),
sparse_linear_algebra_library_type(sparse_linear_algebra_library_type),
use_automatic_ordering(use_automatic_ordering),
@@ -73,10 +76,11 @@ struct SolverConfig {
num_threads(1) {
}
- SolverConfig(LinearSolverType linear_solver_type,
- SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
- bool use_automatic_ordering,
- PreconditionerType preconditioner_type)
+ SolverConfig(
+ LinearSolverType linear_solver_type,
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
+ bool use_automatic_ordering,
+ PreconditionerType preconditioner_type)
: linear_solver_type(linear_solver_type),
sparse_linear_algebra_library_type(sparse_linear_algebra_library_type),
use_automatic_ordering(use_automatic_ordering),
@@ -88,7 +92,8 @@ struct SolverConfig {
return StringPrintf(
"(%s, %s, %s, %s, %d)",
LinearSolverTypeToString(linear_solver_type),
- SparseLinearAlgebraLibraryTypeToString(sparse_linear_algebra_library_type),
+ SparseLinearAlgebraLibraryTypeToString(
+ sparse_linear_algebra_library_type),
use_automatic_ordering ? "AUTOMATIC" : "USER",
PreconditionerTypeToString(preconditioner_type),
num_threads);
@@ -137,8 +142,7 @@ void RunSolversAndCheckTheyMatch(const vector<SolverConfig>& configurations,
options.num_linear_solver_threads = config.num_threads;
if (config.use_automatic_ordering) {
- delete options.linear_solver_ordering;
- options.linear_solver_ordering = NULL;
+ options.linear_solver_ordering.reset();
}
LOG(INFO) << "Running solver configuration: "
@@ -157,7 +161,7 @@ void RunSolversAndCheckTheyMatch(const vector<SolverConfig>& configurations,
NULL,
NULL);
- CHECK_NE(summary.termination_type, ceres::NUMERICAL_FAILURE)
+ CHECK_NE(summary.termination_type, ceres::FAILURE)
<< "Solver configuration " << i << " failed.";
problems.push_back(system_test_problem);
@@ -395,7 +399,7 @@ class BundleAdjustmentProblem {
problem_.AddResidualBlock(cost_function, NULL, camera, point);
}
- options_.linear_solver_ordering = new ParameterBlockOrdering;
+ options_.linear_solver_ordering.reset(new ParameterBlockOrdering);
// The points come before the cameras.
for (int i = 0; i < num_points_; ++i) {
@@ -491,40 +495,45 @@ TEST(SystemTest, BundleAdjustmentProblem) {
ordering, \
preconditioner))
-#ifndef CERES_NO_SUITESPARSE
- CONFIGURE(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
- CONFIGURE(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE, kUserOrdering, IDENTITY);
-
- CONFIGURE(SPARSE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
- CONFIGURE(SPARSE_SCHUR, SUITE_SPARSE, kUserOrdering, IDENTITY);
-#endif // CERES_NO_SUITESPARSE
-
-#ifndef CERES_NO_CXSPARSE
- CONFIGURE(SPARSE_SCHUR, CX_SPARSE, kAutomaticOrdering, IDENTITY);
- CONFIGURE(SPARSE_SCHUR, CX_SPARSE, kUserOrdering, IDENTITY);
-#endif // CERES_NO_CXSPARSE
-
CONFIGURE(DENSE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
CONFIGURE(DENSE_SCHUR, SUITE_SPARSE, kUserOrdering, IDENTITY);
CONFIGURE(CGNR, SUITE_SPARSE, kAutomaticOrdering, JACOBI);
+
CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kUserOrdering, JACOBI);
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, JACOBI);
+
CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kUserOrdering, SCHUR_JACOBI);
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, SCHUR_JACOBI);
#ifndef CERES_NO_SUITESPARSE
+ CONFIGURE(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
+ CONFIGURE(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE, kUserOrdering, IDENTITY);
+
+ CONFIGURE(SPARSE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
+ CONFIGURE(SPARSE_SCHUR, SUITE_SPARSE, kUserOrdering, IDENTITY);
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, CLUSTER_JACOBI);
CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kUserOrdering, CLUSTER_JACOBI);
+
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, CLUSTER_TRIDIAGONAL);
CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kUserOrdering, CLUSTER_TRIDIAGONAL);
#endif // CERES_NO_SUITESPARSE
- CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, JACOBI);
- CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, SCHUR_JACOBI);
+#ifndef CERES_NO_CXSPARSE
+ CONFIGURE(SPARSE_NORMAL_CHOLESKY, CX_SPARSE, kAutomaticOrdering, IDENTITY);
+ CONFIGURE(SPARSE_NORMAL_CHOLESKY, CX_SPARSE, kUserOrdering, IDENTITY);
-#ifndef CERES_NO_SUITESPARSE
+ CONFIGURE(SPARSE_SCHUR, CX_SPARSE, kAutomaticOrdering, IDENTITY);
+ CONFIGURE(SPARSE_SCHUR, CX_SPARSE, kUserOrdering, IDENTITY);
+#endif // CERES_NO_CXSPARSE
- CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, CLUSTER_JACOBI);
- CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, CLUSTER_TRIDIAGONAL);
-#endif // CERES_NO_SUITESPARSE
+#ifdef CERES_USE_EIGEN_SPARSE
+ CONFIGURE(SPARSE_SCHUR, EIGEN_SPARSE, kAutomaticOrdering, IDENTITY);
+ CONFIGURE(SPARSE_SCHUR, EIGEN_SPARSE, kUserOrdering, IDENTITY);
+ CONFIGURE(SPARSE_NORMAL_CHOLESKY, EIGEN_SPARSE, kAutomaticOrdering, IDENTITY);
+ CONFIGURE(SPARSE_NORMAL_CHOLESKY, EIGEN_SPARSE, kUserOrdering, IDENTITY);
+#endif // CERES_USE_EIGEN_SPARSE
#undef CONFIGURE
diff --git a/internal/ceres/test_util.cc b/internal/ceres/test_util.cc
index a3f67bd..8af48ab 100644
--- a/internal/ceres/test_util.cc
+++ b/internal/ceres/test_util.cc
@@ -30,6 +30,7 @@
//
// Utility functions useful for testing.
+#include <algorithm>
#include <cmath>
#include "ceres/file.h"
#include "ceres/stringprintf.h"
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc
index 03d6c8e..4be5619 100644
--- a/internal/ceres/trust_region_minimizer.cc
+++ b/internal/ceres/trust_region_minimizer.cc
@@ -44,6 +44,7 @@
#include "ceres/file.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
+#include "ceres/line_search.h"
#include "ceres/linear_least_squares_problems.h"
#include "ceres/sparse_matrix.h"
#include "ceres/stringprintf.h"
@@ -55,8 +56,53 @@
namespace ceres {
namespace internal {
namespace {
-// Small constant for various floating point issues.
-const double kEpsilon = 1e-12;
+
+LineSearch::Summary DoLineSearch(const Minimizer::Options& options,
+ const Vector& x,
+ const Vector& gradient,
+ const double cost,
+ const Vector& delta,
+ Evaluator* evaluator) {
+ LineSearchFunction line_search_function(evaluator);
+
+ LineSearch::Options line_search_options;
+ line_search_options.is_silent = true;
+ line_search_options.interpolation_type =
+ options.line_search_interpolation_type;
+ line_search_options.min_step_size = options.min_line_search_step_size;
+ line_search_options.sufficient_decrease =
+ options.line_search_sufficient_function_decrease;
+ line_search_options.max_step_contraction =
+ options.max_line_search_step_contraction;
+ line_search_options.min_step_contraction =
+ options.min_line_search_step_contraction;
+ line_search_options.max_num_iterations =
+ options.max_num_line_search_step_size_iterations;
+ line_search_options.sufficient_curvature_decrease =
+ options.line_search_sufficient_curvature_decrease;
+ line_search_options.max_step_expansion =
+ options.max_line_search_step_expansion;
+ line_search_options.function = &line_search_function;
+
+ string message;
+ scoped_ptr<LineSearch>
+ line_search(CHECK_NOTNULL(
+ LineSearch::Create(ceres::ARMIJO,
+ line_search_options,
+ &message)));
+ LineSearch::Summary summary;
+ line_search_function.Init(x, delta);
+ // Try the trust region step.
+ line_search->Search(1.0, cost, gradient.dot(delta), &summary);
+ if (!summary.success) {
+ // If that was not successful, try the negative gradient as a
+ // search direction.
+ line_search_function.Init(x, -gradient);
+ line_search->Search(1.0, cost, -gradient.squaredNorm(), &summary);
+ }
+ return summary;
+}
+
} // namespace
// Compute a scaling vector that is used to improve the conditioning
@@ -82,22 +128,29 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
double iteration_start_time = start_time;
Init(options);
- summary->termination_type = NO_CONVERGENCE;
- summary->num_successful_steps = 0;
- summary->num_unsuccessful_steps = 0;
-
Evaluator* evaluator = CHECK_NOTNULL(options_.evaluator);
SparseMatrix* jacobian = CHECK_NOTNULL(options_.jacobian);
TrustRegionStrategy* strategy = CHECK_NOTNULL(options_.trust_region_strategy);
+ const bool is_not_silent = !options.is_silent;
+
+ // If the problem is bounds constrained, then enable the use of a
+ // line search after the trust region step has been computed. This
+ // line search will automatically use a projected test point onto
+ // the feasible set, there by guaranteeing the feasibility of the
+ // final output.
+ //
+ // TODO(sameeragarwal): Make line search available more generally.
+ const bool use_line_search = options.is_constrained;
+
+ summary->termination_type = NO_CONVERGENCE;
+ summary->num_successful_steps = 0;
+ summary->num_unsuccessful_steps = 0;
+
const int num_parameters = evaluator->NumParameters();
const int num_effective_parameters = evaluator->NumEffectiveParameters();
const int num_residuals = evaluator->NumResiduals();
- VectorRef x_min(parameters, num_parameters);
- Vector x = x_min;
- double x_norm = x.norm();
-
Vector residuals(num_residuals);
Vector trust_region_step(num_effective_parameters);
Vector delta(num_effective_parameters);
@@ -105,6 +158,8 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
Vector gradient(num_effective_parameters);
Vector model_residuals(num_residuals);
Vector scale(num_effective_parameters);
+ Vector negative_gradient(num_effective_parameters);
+ Vector projected_gradient_step(num_parameters);
IterationSummary iteration_summary;
iteration_summary.iteration = 0;
@@ -112,15 +167,32 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.step_is_successful = false;
iteration_summary.cost_change = 0.0;
iteration_summary.gradient_max_norm = 0.0;
+ iteration_summary.gradient_norm = 0.0;
iteration_summary.step_norm = 0.0;
iteration_summary.relative_decrease = 0.0;
iteration_summary.trust_region_radius = strategy->Radius();
- // TODO(sameeragarwal): Rename eta to linear_solver_accuracy or
- // something similar across the board.
iteration_summary.eta = options_.eta;
iteration_summary.linear_solver_iterations = 0;
iteration_summary.step_solver_time_in_seconds = 0;
+ VectorRef x_min(parameters, num_parameters);
+ Vector x = x_min;
+ // Project onto the feasible set.
+ if (options.is_constrained) {
+ delta.setZero();
+ if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
+ summary->message =
+ "Unable to project initial point onto the feasible set.";
+ summary->termination_type = FAILURE;
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ return;
+ }
+ x_min = x_plus_delta;
+ x = x_plus_delta;
+ }
+
+ double x_norm = x.norm();
+
// Do initial cost and Jacobian evaluation.
double cost = 0.0;
if (!evaluator->Evaluate(x.data(),
@@ -128,45 +200,38 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
residuals.data(),
gradient.data(),
jacobian)) {
- LOG(WARNING) << "Terminating: Residual and Jacobian evaluation failed.";
- summary->termination_type = NUMERICAL_FAILURE;
+ summary->message = "Residual and Jacobian evaluation failed.";
+ summary->termination_type = FAILURE;
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
return;
}
- int num_consecutive_nonmonotonic_steps = 0;
- double minimum_cost = cost;
- double reference_cost = cost;
- double accumulated_reference_model_cost_change = 0.0;
- double candidate_cost = cost;
- double accumulated_candidate_model_cost_change = 0.0;
+ negative_gradient = -gradient;
+ if (!evaluator->Plus(x.data(),
+ negative_gradient.data(),
+ projected_gradient_step.data())) {
+ summary->message = "Unable to compute gradient step.";
+ summary->termination_type = FAILURE;
+ LOG(ERROR) << "Terminating: " << summary->message;
+ return;
+ }
summary->initial_cost = cost + summary->fixed_cost;
iteration_summary.cost = cost + summary->fixed_cost;
- iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
-
- // The initial gradient max_norm is bounded from below so that we do
- // not divide by zero.
- const double initial_gradient_max_norm =
- max(iteration_summary.gradient_max_norm, kEpsilon);
- const double absolute_gradient_tolerance =
- options_.gradient_tolerance * initial_gradient_max_norm;
-
- if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
- summary->termination_type = GRADIENT_TOLERANCE;
- VLOG(1) << "Terminating: Gradient tolerance reached."
- << "Relative gradient max norm: "
- << iteration_summary.gradient_max_norm / initial_gradient_max_norm
- << " <= " << options_.gradient_tolerance;
+ iteration_summary.gradient_max_norm =
+ (x - projected_gradient_step).lpNorm<Eigen::Infinity>();
+ iteration_summary.gradient_norm = (x - projected_gradient_step).norm();
+
+ if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
+ summary->message = StringPrintf("Gradient tolerance reached. "
+ "Gradient max norm: %e <= %e",
+ iteration_summary.gradient_max_norm,
+ options_.gradient_tolerance);
+ summary->termination_type = CONVERGENCE;
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return;
}
- iteration_summary.iteration_time_in_seconds =
- WallTimeInSeconds() - iteration_start_time;
- iteration_summary.cumulative_time_in_seconds =
- WallTimeInSeconds() - start_time
- + summary->preprocessor_time_in_seconds;
- summary->iterations.push_back(iteration_summary);
-
if (options_.jacobi_scaling) {
EstimateScale(*jacobian, scale.data());
jacobian->ScaleColumns(scale.data());
@@ -174,27 +239,42 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
scale.setOnes();
}
+ iteration_summary.iteration_time_in_seconds =
+ WallTimeInSeconds() - iteration_start_time;
+ iteration_summary.cumulative_time_in_seconds =
+ WallTimeInSeconds() - start_time
+ + summary->preprocessor_time_in_seconds;
+ summary->iterations.push_back(iteration_summary);
+
+ int num_consecutive_nonmonotonic_steps = 0;
+ double minimum_cost = cost;
+ double reference_cost = cost;
+ double accumulated_reference_model_cost_change = 0.0;
+ double candidate_cost = cost;
+ double accumulated_candidate_model_cost_change = 0.0;
int num_consecutive_invalid_steps = 0;
bool inner_iterations_are_enabled = options.inner_iteration_minimizer != NULL;
while (true) {
bool inner_iterations_were_useful = false;
- if (!RunCallbacks(options.callbacks, iteration_summary, summary)) {
+ if (!RunCallbacks(options, iteration_summary, summary)) {
return;
}
iteration_start_time = WallTimeInSeconds();
if (iteration_summary.iteration >= options_.max_num_iterations) {
+ summary->message = "Maximum number of iterations reached.";
summary->termination_type = NO_CONVERGENCE;
- VLOG(1) << "Terminating: Maximum number of iterations reached.";
- break;
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+ return;
}
const double total_solver_time = iteration_start_time - start_time +
summary->preprocessor_time_in_seconds;
if (total_solver_time >= options_.max_solver_time_in_seconds) {
+ summary->message = "Maximum solver time reached.";
summary->termination_type = NO_CONVERGENCE;
- VLOG(1) << "Terminating: Maximum solver time reached.";
- break;
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+ return;
}
const double strategy_start_time = WallTimeInSeconds();
@@ -221,6 +301,15 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
residuals.data(),
trust_region_step.data());
+ if (strategy_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
+ summary->message =
+ "Linear solver failed due to unrecoverable "
+ "non-numeric causes. Please see the error log for clues. ";
+ summary->termination_type = FAILURE;
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ return;
+ }
+
iteration_summary = IterationSummary();
iteration_summary.iteration = summary->iterations.back().iteration + 1;
iteration_summary.step_solver_time_in_seconds =
@@ -231,7 +320,7 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.step_is_successful = false;
double model_cost_change = 0.0;
- if (strategy_summary.termination_type != FAILURE) {
+ if (strategy_summary.termination_type != LINEAR_SOLVER_FAILURE) {
// new_model_cost
// = 1/2 [f + J * step]^2
// = 1/2 [ f'f + 2f'J * step + step' * J' * J * step ]
@@ -245,9 +334,10 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
- model_residuals.dot(residuals + model_residuals / 2.0);
if (model_cost_change < 0.0) {
- VLOG(1) << "Invalid step: current_cost: " << cost
- << " absolute difference " << model_cost_change
- << " relative difference " << (model_cost_change / cost);
+ VLOG_IF(1, is_not_silent)
+ << "Invalid step: current_cost: " << cost
+ << " absolute difference " << model_cost_change
+ << " relative difference " << (model_cost_change / cost);
} else {
iteration_summary.step_is_valid = true;
}
@@ -256,16 +346,15 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
if (!iteration_summary.step_is_valid) {
// Invalid steps can happen due to a number of reasons, and we
// allow a limited number of successive failures, and return with
- // NUMERICAL_FAILURE if this limit is exceeded.
+ // FAILURE if this limit is exceeded.
if (++num_consecutive_invalid_steps >=
options_.max_num_consecutive_invalid_steps) {
- summary->termination_type = NUMERICAL_FAILURE;
- summary->error = StringPrintf(
- "Terminating. Number of successive invalid steps more "
+ summary->message = StringPrintf(
+ "Number of successive invalid steps more "
"than Solver::Options::max_num_consecutive_invalid_steps: %d",
options_.max_num_consecutive_invalid_steps);
-
- LOG(WARNING) << summary->error;
+ summary->termination_type = FAILURE;
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -278,6 +367,8 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.cost_change = 0.0;
iteration_summary.gradient_max_norm =
summary->iterations.back().gradient_max_norm;
+ iteration_summary.gradient_norm =
+ summary->iterations.back().gradient_norm;
iteration_summary.step_norm = 0.0;
iteration_summary.relative_decrease = 0.0;
iteration_summary.eta = options_.eta;
@@ -287,26 +378,37 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
// Undo the Jacobian column scaling.
delta = (trust_region_step.array() * scale.array()).matrix();
- if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
- summary->termination_type = NUMERICAL_FAILURE;
- summary->error =
- "Terminating. Failed to compute Plus(x, delta, x_plus_delta).";
- LOG(WARNING) << summary->error;
- return;
+ // Try improving the step further by using an ARMIJO line
+ // search.
+ //
+ // TODO(sameeragarwal): What happens to trust region sizing as
+ // it interacts with the line search ?
+ if (use_line_search) {
+ const LineSearch::Summary line_search_summary =
+ DoLineSearch(options, x, gradient, cost, delta, evaluator);
+ if (line_search_summary.success) {
+ delta *= line_search_summary.optimal_step_size;
+ }
}
- // Try this step.
- double new_cost = numeric_limits<double>::max();
- if (!evaluator->Evaluate(x_plus_delta.data(),
- &new_cost,
- NULL, NULL, NULL)) {
- // If the evaluation of the new cost fails, treat it as a step
- // with high cost.
- LOG(WARNING) << "Step failed to evaluate. "
- << "Treating it as step with infinite cost";
- new_cost = numeric_limits<double>::max();
+ double new_cost = std::numeric_limits<double>::max();
+ if (evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
+ if (!evaluator->Evaluate(x_plus_delta.data(),
+ &new_cost,
+ NULL,
+ NULL,
+ NULL)) {
+ LOG(WARNING) << "Step failed to evaluate. "
+ << "Treating it as a step with infinite cost";
+ new_cost = numeric_limits<double>::max();
+ }
} else {
+ LOG(WARNING) << "x_plus_delta = Plus(x, delta) failed. "
+ << "Treating it as a step with infinite cost";
+ }
+
+ if (new_cost < std::numeric_limits<double>::max()) {
// Check if performing an inner iteration will make it better.
if (inner_iterations_are_enabled) {
++summary->num_inner_iteration_steps;
@@ -320,30 +422,30 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
if (!evaluator->Evaluate(inner_iteration_x.data(),
&new_cost,
NULL, NULL, NULL)) {
- VLOG(2) << "Inner iteration failed.";
+ VLOG_IF(2, is_not_silent) << "Inner iteration failed.";
new_cost = x_plus_delta_cost;
} else {
x_plus_delta = inner_iteration_x;
// Boost the model_cost_change, since the inner iteration
// improvements are not accounted for by the trust region.
model_cost_change += x_plus_delta_cost - new_cost;
- VLOG(2) << "Inner iteration succeeded; current cost: " << cost
- << " x_plus_delta_cost: " << x_plus_delta_cost
- << " new_cost: " << new_cost;
- const double inner_iteration_relative_progress =
- 1.0 - new_cost / x_plus_delta_cost;
- inner_iterations_are_enabled =
- (inner_iteration_relative_progress >
- options.inner_iteration_tolerance);
+ VLOG_IF(2, is_not_silent)
+ << "Inner iteration succeeded; Current cost: " << cost
+ << " Trust region step cost: " << x_plus_delta_cost
+ << " Inner iteration cost: " << new_cost;
inner_iterations_were_useful = new_cost < cost;
+ const double inner_iteration_relative_progress =
+ 1.0 - new_cost / x_plus_delta_cost;
// Disable inner iterations once the relative improvement
// drops below tolerance.
- if (!inner_iterations_are_enabled) {
- VLOG(2) << "Disabling inner iterations. Progress : "
- << inner_iteration_relative_progress;
- }
+ inner_iterations_are_enabled =
+ (inner_iteration_relative_progress >
+ options.inner_iteration_tolerance);
+ VLOG_IF(2, is_not_silent && !inner_iterations_are_enabled)
+ << "Disabling inner iterations. Progress : "
+ << inner_iteration_relative_progress;
}
summary->inner_iteration_time_in_seconds +=
WallTimeInSeconds() - inner_iteration_start_time;
@@ -356,12 +458,14 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
const double step_size_tolerance = options_.parameter_tolerance *
(x_norm + options_.parameter_tolerance);
if (iteration_summary.step_norm <= step_size_tolerance) {
- VLOG(1) << "Terminating. Parameter tolerance reached. "
- << "relative step_norm: "
- << iteration_summary.step_norm /
- (x_norm + options_.parameter_tolerance)
- << " <= " << options_.parameter_tolerance;
- summary->termination_type = PARAMETER_TOLERANCE;
+ summary->message =
+ StringPrintf("Parameter tolerance reached. "
+ "Relative step_norm: %e <= %e.",
+ (iteration_summary.step_norm /
+ (x_norm + options_.parameter_tolerance)),
+ options_.parameter_tolerance);
+ summary->termination_type = CONVERGENCE;
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -369,11 +473,13 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
const double absolute_function_tolerance =
options_.function_tolerance * cost;
if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
- VLOG(1) << "Terminating. Function tolerance reached. "
- << "|cost_change|/cost: "
- << fabs(iteration_summary.cost_change) / cost
- << " <= " << options_.function_tolerance;
- summary->termination_type = FUNCTION_TOLERANCE;
+ summary->message =
+ StringPrintf("Function tolerance reached. "
+ "|cost_change|/cost: %e <= %e",
+ fabs(iteration_summary.cost_change) / cost,
+ options_.function_tolerance);
+ summary->termination_type = CONVERGENCE;
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -447,10 +553,12 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
if (!inner_iterations_were_useful &&
relative_decrease <= options_.min_relative_decrease) {
iteration_summary.step_is_nonmonotonic = true;
- VLOG(2) << "Non-monotonic step! "
- << " relative_decrease: " << relative_decrease
- << " historical_relative_decrease: "
- << historical_relative_decrease;
+ VLOG_IF(2, is_not_silent)
+ << "Non-monotonic step! "
+ << " relative_decrease: "
+ << relative_decrease
+ << " historical_relative_decrease: "
+ << historical_relative_decrease;
}
}
}
@@ -458,6 +566,7 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
if (iteration_summary.step_is_successful) {
++summary->num_successful_steps;
strategy->StepAccepted(iteration_summary.relative_decrease);
+
x = x_plus_delta;
x_norm = x.norm();
@@ -468,22 +577,34 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
residuals.data(),
gradient.data(),
jacobian)) {
- summary->termination_type = NUMERICAL_FAILURE;
- summary->error =
- "Terminating: Residual and Jacobian evaluation failed.";
- LOG(WARNING) << summary->error;
+ summary->message = "Residual and Jacobian evaluation failed.";
+ summary->termination_type = FAILURE;
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
return;
}
- iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
+ negative_gradient = -gradient;
+ if (!evaluator->Plus(x.data(),
+ negative_gradient.data(),
+ projected_gradient_step.data())) {
+ summary->message =
+ "projected_gradient_step = Plus(x, -gradient) failed.";
+ summary->termination_type = FAILURE;
+ LOG(ERROR) << "Terminating: " << summary->message;
+ return;
+ }
- if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
- summary->termination_type = GRADIENT_TOLERANCE;
- VLOG(1) << "Terminating: Gradient tolerance reached."
- << "Relative gradient max norm: "
- << (iteration_summary.gradient_max_norm /
- initial_gradient_max_norm)
- << " <= " << options_.gradient_tolerance;
+ iteration_summary.gradient_max_norm =
+ (x - projected_gradient_step).lpNorm<Eigen::Infinity>();
+ iteration_summary.gradient_norm = (x - projected_gradient_step).norm();
+
+ if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
+ summary->message = StringPrintf("Gradient tolerance reached. "
+ "Gradient max norm: %e <= %e",
+ iteration_summary.gradient_max_norm,
+ options_.gradient_tolerance);
+ summary->termination_type = CONVERGENCE;
+ VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
return;
}
@@ -511,7 +632,8 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
if (cost > candidate_cost) {
// The current iterate is has a higher cost than the
// candidate iterate. Set the candidate to this point.
- VLOG(2) << "Updating the candidate iterate to the current point.";
+ VLOG_IF(2, is_not_silent)
+ << "Updating the candidate iterate to the current point.";
candidate_cost = cost;
accumulated_candidate_model_cost_change = 0.0;
}
@@ -525,7 +647,8 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
// iterate.
if (num_consecutive_nonmonotonic_steps ==
options.max_consecutive_nonmonotonic_steps) {
- VLOG(2) << "Resetting the reference point to the candidate point";
+ VLOG_IF(2, is_not_silent)
+ << "Resetting the reference point to the candidate point";
reference_cost = candidate_cost;
accumulated_reference_model_cost_change =
accumulated_candidate_model_cost_change;
@@ -544,8 +667,9 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.trust_region_radius = strategy->Radius();
if (iteration_summary.trust_region_radius <
options_.min_trust_region_radius) {
- summary->termination_type = PARAMETER_TOLERANCE;
- VLOG(1) << "Termination. Minimum trust region radius reached.";
+ summary->message = "Termination. Minimum trust region radius reached.";
+ summary->termination_type = CONVERGENCE;
+ VLOG_IF(1, is_not_silent) << summary->message;
return;
}
diff --git a/internal/ceres/trust_region_strategy.h b/internal/ceres/trust_region_strategy.h
index 0dcdbfe..998514f 100644
--- a/internal/ceres/trust_region_strategy.h
+++ b/internal/ceres/trust_region_strategy.h
@@ -33,7 +33,7 @@
#include <string>
#include "ceres/internal/port.h"
-#include "ceres/types.h"
+#include "ceres/linear_solver.h"
namespace ceres {
namespace internal {
@@ -106,7 +106,7 @@ class TrustRegionStrategy {
Summary()
: residual_norm(0.0),
num_iterations(-1),
- termination_type(FAILURE) {
+ termination_type(LINEAR_SOLVER_FAILURE) {
}
// If the trust region problem is,
diff --git a/internal/ceres/types.cc b/internal/ceres/types.cc
index a97f1a5..4710261 100644
--- a/internal/ceres/types.cc
+++ b/internal/ceres/types.cc
@@ -96,6 +96,7 @@ const char* SparseLinearAlgebraLibraryTypeToString(
switch (type) {
CASESTR(SUITE_SPARSE);
CASESTR(CX_SPARSE);
+ CASESTR(EIGEN_SPARSE);
default:
return "UNKNOWN";
}
@@ -107,6 +108,7 @@ bool StringToSparseLinearAlgebraLibraryType(
UpperCase(&value);
STRENUM(SUITE_SPARSE);
STRENUM(CX_SPARSE);
+ STRENUM(EIGEN_SPARSE);
return false;
}
@@ -240,7 +242,7 @@ const char* NonlinearConjugateGradientTypeToString(
NonlinearConjugateGradientType type) {
switch (type) {
CASESTR(FLETCHER_REEVES);
- CASESTR(POLAK_RIBIRERE);
+ CASESTR(POLAK_RIBIERE);
CASESTR(HESTENES_STIEFEL);
default:
return "UNKNOWN";
@@ -252,7 +254,7 @@ bool StringToNonlinearConjugateGradientType(
NonlinearConjugateGradientType* type) {
UpperCase(&value);
STRENUM(FLETCHER_REEVES);
- STRENUM(POLAK_RIBIRERE);
+ STRENUM(POLAK_RIBIERE);
STRENUM(HESTENES_STIEFEL);
return false;
}
@@ -261,8 +263,8 @@ const char* CovarianceAlgorithmTypeToString(
CovarianceAlgorithmType type) {
switch (type) {
CASESTR(DENSE_SVD);
- CASESTR(SPARSE_CHOLESKY);
- CASESTR(SPARSE_QR);
+ CASESTR(EIGEN_SPARSE_QR);
+ CASESTR(SUITE_SPARSE_QR);
default:
return "UNKNOWN";
}
@@ -273,33 +275,37 @@ bool StringToCovarianceAlgorithmType(
CovarianceAlgorithmType* type) {
UpperCase(&value);
STRENUM(DENSE_SVD);
- STRENUM(SPARSE_CHOLESKY);
- STRENUM(SPARSE_QR);
+ STRENUM(EIGEN_SPARSE_QR);
+ STRENUM(SUITE_SPARSE_QR);
return false;
}
-const char* SolverTerminationTypeToString(SolverTerminationType type) {
+const char* VisibilityClusteringTypeToString(
+ VisibilityClusteringType type) {
switch (type) {
- CASESTR(NO_CONVERGENCE);
- CASESTR(FUNCTION_TOLERANCE);
- CASESTR(GRADIENT_TOLERANCE);
- CASESTR(PARAMETER_TOLERANCE);
- CASESTR(NUMERICAL_FAILURE);
- CASESTR(USER_ABORT);
- CASESTR(USER_SUCCESS);
- CASESTR(DID_NOT_RUN);
+ CASESTR(CANONICAL_VIEWS);
+ CASESTR(SINGLE_LINKAGE);
default:
return "UNKNOWN";
}
}
-const char* LinearSolverTerminationTypeToString(
- LinearSolverTerminationType type) {
+bool StringToVisibilityClusteringType(
+ string value,
+ VisibilityClusteringType* type) {
+ UpperCase(&value);
+ STRENUM(CANONICAL_VIEWS);
+ STRENUM(SINGLE_LINKAGE);
+ return false;
+}
+
+const char* TerminationTypeToString(TerminationType type) {
switch (type) {
- CASESTR(TOLERANCE);
- CASESTR(MAX_ITERATIONS);
- CASESTR(STAGNATION);
+ CASESTR(CONVERGENCE);
+ CASESTR(NO_CONVERGENCE);
CASESTR(FAILURE);
+ CASESTR(USER_SUCCESS);
+ CASESTR(USER_FAILURE);
default:
return "UNKNOWN";
}
diff --git a/internal/ceres/unsymmetric_linear_solver_test.cc b/internal/ceres/unsymmetric_linear_solver_test.cc
index af9dffe..0b82e6a 100644
--- a/internal/ceres/unsymmetric_linear_solver_test.cc
+++ b/internal/ceres/unsymmetric_linear_solver_test.cc
@@ -57,7 +57,7 @@ class UnsymmetricLinearSolverTest : public ::testing::Test {
}
void TestSolver(const LinearSolver::Options& options) {
- scoped_ptr<LinearSolver> solver(LinearSolver::Create(options));
+
LinearSolver::PerSolveOptions per_solve_options;
LinearSolver::Summary unregularized_solve_summary;
@@ -84,13 +84,17 @@ class UnsymmetricLinearSolverTest : public ::testing::Test {
} else {
LOG(FATAL) << "Unknown linear solver : " << options.type;
}
+
// Unregularized
+ scoped_ptr<LinearSolver> solver(LinearSolver::Create(options));
unregularized_solve_summary =
solver->Solve(transformed_A.get(),
b_.get(),
per_solve_options,
x_unregularized.data());
+ // Sparsity structure is changing, reset the solver.
+ solver.reset(LinearSolver::Create(options));
// Regularized solution
per_solve_options.D = D_.get();
regularized_solve_summary =
@@ -99,15 +103,23 @@ class UnsymmetricLinearSolverTest : public ::testing::Test {
per_solve_options,
x_regularized.data());
- EXPECT_EQ(unregularized_solve_summary.termination_type, TOLERANCE);
+ EXPECT_EQ(unregularized_solve_summary.termination_type,
+ LINEAR_SOLVER_SUCCESS);
for (int i = 0; i < A_->num_cols(); ++i) {
- EXPECT_NEAR(sol_unregularized_[i], x_unregularized[i], 1e-8);
+ EXPECT_NEAR(sol_unregularized_[i], x_unregularized[i], 1e-8)
+ << "\nExpected: "
+ << ConstVectorRef(sol_unregularized_.get(), A_->num_cols()).transpose()
+ << "\nActual: " << x_unregularized.transpose();
}
- EXPECT_EQ(regularized_solve_summary.termination_type, TOLERANCE);
+ EXPECT_EQ(regularized_solve_summary.termination_type,
+ LINEAR_SOLVER_SUCCESS);
for (int i = 0; i < A_->num_cols(); ++i) {
- EXPECT_NEAR(sol_regularized_[i], x_regularized[i], 1e-8);
+ EXPECT_NEAR(sol_regularized_[i], x_regularized[i], 1e-8)
+ << "\nExpected: "
+ << ConstVectorRef(sol_regularized_.get(), A_->num_cols()).transpose()
+ << "\nActual: " << x_regularized.transpose();
}
}
@@ -166,6 +178,15 @@ TEST_F(UnsymmetricLinearSolverTest,
options.use_postordering = true;
TestSolver(options);
}
+
+TEST_F(UnsymmetricLinearSolverTest,
+ SparseNormalCholeskyUsingSuiteSparseDynamicSparsity) {
+ LinearSolver::Options options;
+ options.sparse_linear_algebra_library_type = SUITE_SPARSE;
+ options.type = SPARSE_NORMAL_CHOLESKY;
+ options.dynamic_sparsity = true;
+ TestSolver(options);
+}
#endif
#ifndef CERES_NO_CXSPARSE
@@ -186,7 +207,46 @@ TEST_F(UnsymmetricLinearSolverTest,
options.use_postordering = true;
TestSolver(options);
}
+
+TEST_F(UnsymmetricLinearSolverTest,
+ SparseNormalCholeskyUsingCXSparseDynamicSparsity) {
+ LinearSolver::Options options;
+ options.sparse_linear_algebra_library_type = CX_SPARSE;
+ options.type = SPARSE_NORMAL_CHOLESKY;
+ options.dynamic_sparsity = true;
+ TestSolver(options);
+}
#endif
+#ifdef CERES_USE_EIGEN_SPARSE
+TEST_F(UnsymmetricLinearSolverTest,
+ SparseNormalCholeskyUsingEigenPreOrdering) {
+ LinearSolver::Options options;
+ options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+ options.type = SPARSE_NORMAL_CHOLESKY;
+ options.use_postordering = false;
+ TestSolver(options);
+}
+
+TEST_F(UnsymmetricLinearSolverTest,
+ SparseNormalCholeskyUsingEigenPostOrdering) {
+ LinearSolver::Options options;
+ options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+ options.type = SPARSE_NORMAL_CHOLESKY;
+ options.use_postordering = true;
+ TestSolver(options);
+}
+
+TEST_F(UnsymmetricLinearSolverTest,
+ SparseNormalCholeskyUsingEigenDynamicSparsity) {
+ LinearSolver::Options options;
+ options.sparse_linear_algebra_library_type = EIGEN_SPARSE;
+ options.type = SPARSE_NORMAL_CHOLESKY;
+ options.dynamic_sparsity = true;
+ TestSolver(options);
+}
+
+#endif // CERES_USE_EIGEN_SPARSE
+
} // namespace internal
} // namespace ceres
diff --git a/internal/ceres/visibility.cc b/internal/ceres/visibility.cc
index acfa45b..b3ee185 100644
--- a/internal/ceres/visibility.cc
+++ b/internal/ceres/visibility.cc
@@ -28,6 +28,9 @@
//
// Author: kushalav@google.com (Avanish Kushal)
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_NO_SUITESPARSE
#include "ceres/visibility.h"
diff --git a/internal/ceres/visibility.h b/internal/ceres/visibility.h
index 2d1e6f8..5ddd3a5 100644
--- a/internal/ceres/visibility.h
+++ b/internal/ceres/visibility.h
@@ -35,6 +35,9 @@
#ifndef CERES_INTERNAL_VISIBILITY_H_
#define CERES_INTERNAL_VISIBILITY_H_
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_NO_SUITESPARSE
#include <set>
diff --git a/internal/ceres/visibility_based_preconditioner.cc b/internal/ceres/visibility_based_preconditioner.cc
index 7af1339..695eedc 100644
--- a/internal/ceres/visibility_based_preconditioner.cc
+++ b/internal/ceres/visibility_based_preconditioner.cc
@@ -28,6 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_NO_SUITESPARSE
#include "ceres/visibility_based_preconditioner.h"
@@ -43,12 +46,12 @@
#include "ceres/block_sparse_matrix.h"
#include "ceres/canonical_views_clustering.h"
#include "ceres/collections_port.h"
-#include "ceres/detect_structure.h"
#include "ceres/graph.h"
#include "ceres/graph_algorithms.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_solver.h"
#include "ceres/schur_eliminator.h"
+#include "ceres/single_linkage_clustering.h"
#include "ceres/visibility.h"
#include "glog/logging.h"
@@ -61,8 +64,9 @@ namespace internal {
//
// This will require some more work on the clustering algorithm and
// possibly some more refactoring of the code.
-static const double kSizePenaltyWeight = 3.0;
-static const double kSimilarityPenaltyWeight = 0.0;
+static const double kCanonicalViewsSizePenaltyWeight = 3.0;
+static const double kCanonicalViewsSimilarityPenaltyWeight = 0.0;
+static const double kSingleLinkageMinSimilarity = 0.9;
VisibilityBasedPreconditioner::VisibilityBasedPreconditioner(
const CompressedRowBlockStructure& bs,
@@ -188,17 +192,31 @@ void VisibilityBasedPreconditioner::ClusterCameras(
scoped_ptr<Graph<int> > schur_complement_graph(
CHECK_NOTNULL(CreateSchurComplementGraph(visibility)));
- CanonicalViewsClusteringOptions options;
- options.size_penalty_weight = kSizePenaltyWeight;
- options.similarity_penalty_weight = kSimilarityPenaltyWeight;
-
- vector<int> centers;
HashMap<int, int> membership;
- ComputeCanonicalViewsClustering(*schur_complement_graph,
- options,
- &centers,
- &membership);
- num_clusters_ = centers.size();
+
+ if (options_.visibility_clustering_type == CANONICAL_VIEWS) {
+ vector<int> centers;
+ CanonicalViewsClusteringOptions clustering_options;
+ clustering_options.size_penalty_weight =
+ kCanonicalViewsSizePenaltyWeight;
+ clustering_options.similarity_penalty_weight =
+ kCanonicalViewsSimilarityPenaltyWeight;
+ ComputeCanonicalViewsClustering(clustering_options,
+ *schur_complement_graph,
+ &centers,
+ &membership);
+ num_clusters_ = centers.size();
+ } else if (options_.visibility_clustering_type == SINGLE_LINKAGE) {
+ SingleLinkageClusteringOptions clustering_options;
+ clustering_options.min_similarity =
+ kSingleLinkageMinSimilarity;
+ num_clusters_ = ComputeSingleLinkageClustering(clustering_options,
+ *schur_complement_graph,
+ &membership);
+ } else {
+ LOG(FATAL) << "Unknown visibility clustering algorithm.";
+ }
+
CHECK_GT(num_clusters_, 0);
VLOG(2) << "num_clusters: " << num_clusters_;
FlattenMembershipMap(membership, &cluster_membership_);
@@ -313,14 +331,11 @@ void VisibilityBasedPreconditioner::InitEliminator(
LinearSolver::Options eliminator_options;
eliminator_options.elimination_groups = options_.elimination_groups;
eliminator_options.num_threads = options_.num_threads;
-
- DetectStructure(bs, options_.elimination_groups[0],
- &eliminator_options.row_block_size,
- &eliminator_options.e_block_size,
- &eliminator_options.f_block_size);
-
+ eliminator_options.e_block_size = options_.e_block_size;
+ eliminator_options.f_block_size = options_.f_block_size;
+ eliminator_options.row_block_size = options_.row_block_size;
eliminator_.reset(SchurEliminatorBase::Create(eliminator_options));
- eliminator_->Init(options_.elimination_groups[0], &bs);
+ eliminator_->Init(eliminator_options.elimination_groups[0], &bs);
}
// Update the values of the preconditioner matrix and factorize it.
@@ -356,14 +371,18 @@ bool VisibilityBasedPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
//
// Doing the factorization like this saves us matrix mass when
// scaling is not needed, which is quite often in our experience.
- bool status = Factorize();
+ LinearSolverTerminationType status = Factorize();
+
+ if (status == LINEAR_SOLVER_FATAL_ERROR) {
+ return false;
+ }
// The scaling only affects the tri-diagonal case, since
// ScaleOffDiagonalBlocks only pays attenion to the cells that
// belong to the edges of the degree-2 forest. In the CLUSTER_JACOBI
// case, the preconditioner is guaranteed to be positive
// semidefinite.
- if (!status && options_.type == CLUSTER_TRIDIAGONAL) {
+ if (status == LINEAR_SOLVER_FAILURE && options_.type == CLUSTER_TRIDIAGONAL) {
VLOG(1) << "Unscaled factorization failed. Retrying with off-diagonal "
<< "scaling";
ScaleOffDiagonalCells();
@@ -371,7 +390,7 @@ bool VisibilityBasedPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
}
VLOG(2) << "Compute time: " << time(NULL) - start_time;
- return status;
+ return (status == LINEAR_SOLVER_SUCCESS);
}
// Consider the preconditioner matrix as meta-block matrix, whose
@@ -408,7 +427,7 @@ void VisibilityBasedPreconditioner::ScaleOffDiagonalCells() {
// Compute the sparse Cholesky factorization of the preconditioner
// matrix.
-bool VisibilityBasedPreconditioner::Factorize() {
+LinearSolverTerminationType VisibilityBasedPreconditioner::Factorize() {
// Extract the TripletSparseMatrix that is used for actually storing
// S and convert it into a cholmod_sparse object.
cholmod_sparse* lhs = ss_.CreateSparseMatrix(
@@ -419,14 +438,21 @@ bool VisibilityBasedPreconditioner::Factorize() {
// matrix contains the values.
lhs->stype = 1;
+ // TODO(sameeragarwal): Refactor to pipe this up and out.
+ string status;
+
// Symbolic factorization is computed if we don't already have one handy.
if (factor_ == NULL) {
- factor_ = ss_.BlockAnalyzeCholesky(lhs, block_size_, block_size_);
+ factor_ = ss_.BlockAnalyzeCholesky(lhs, block_size_, block_size_, &status);
}
- bool status = ss_.Cholesky(lhs, factor_);
+ const LinearSolverTerminationType termination_type =
+ (factor_ != NULL)
+ ? ss_.Cholesky(lhs, factor_, &status)
+ : LINEAR_SOLVER_FATAL_ERROR;
+
ss_.Free(lhs);
- return status;
+ return termination_type;
}
void VisibilityBasedPreconditioner::RightMultiply(const double* x,
@@ -437,7 +463,10 @@ void VisibilityBasedPreconditioner::RightMultiply(const double* x,
const int num_rows = m_->num_rows();
memcpy(CHECK_NOTNULL(tmp_rhs_)->x, x, m_->num_rows() * sizeof(*x));
- cholmod_dense* solution = CHECK_NOTNULL(ss->Solve(factor_, tmp_rhs_));
+ // TODO(sameeragarwal): Better error handling.
+ string status;
+ cholmod_dense* solution =
+ CHECK_NOTNULL(ss->Solve(factor_, tmp_rhs_, &status));
memcpy(y, solution->x, sizeof(*y) * num_rows);
ss->Free(solution);
}
@@ -546,11 +575,17 @@ Graph<int>* VisibilityBasedPreconditioner::CreateClusterGraph(
// cluster ids. Convert this into a flat array for quick lookup. It is
// possible that some of the vertices may not be associated with any
// cluster. In that case, randomly assign them to one of the clusters.
+//
+// The cluster ids can be non-contiguous integers. So as we flatten
+// the membership_map, we also map the cluster ids to a contiguous set
+// of integers so that the cluster ids are in [0, num_clusters_).
void VisibilityBasedPreconditioner::FlattenMembershipMap(
const HashMap<int, int>& membership_map,
vector<int>* membership_vector) const {
CHECK_NOTNULL(membership_vector)->resize(0);
membership_vector->resize(num_blocks_, -1);
+
+ HashMap<int, int> cluster_id_to_index;
// Iterate over the cluster membership map and update the
// cluster_membership_ vector assigning arbitrary cluster ids to
// the few cameras that have not been clustered.
@@ -571,7 +606,16 @@ void VisibilityBasedPreconditioner::FlattenMembershipMap(
cluster_id = camera_id % num_clusters_;
}
- membership_vector->at(camera_id) = cluster_id;
+ const int index = FindWithDefault(cluster_id_to_index,
+ cluster_id,
+ cluster_id_to_index.size());
+
+ if (index == cluster_id_to_index.size()) {
+ cluster_id_to_index[cluster_id] = index;
+ }
+
+ CHECK_LT(index, num_clusters_);
+ membership_vector->at(camera_id) = index;
}
}
diff --git a/internal/ceres/visibility_based_preconditioner.h b/internal/ceres/visibility_based_preconditioner.h
index c58b1a7..70cea83 100644
--- a/internal/ceres/visibility_based_preconditioner.h
+++ b/internal/ceres/visibility_based_preconditioner.h
@@ -55,6 +55,7 @@
#include "ceres/graph.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_solver.h"
#include "ceres/preconditioner.h"
#include "ceres/suitesparse.h"
@@ -147,7 +148,7 @@ class VisibilityBasedPreconditioner : public BlockSparseMatrixPreconditioner {
void ComputeClusterTridiagonalSparsity(const CompressedRowBlockStructure& bs);
void InitStorage(const CompressedRowBlockStructure& bs);
void InitEliminator(const CompressedRowBlockStructure& bs);
- bool Factorize();
+ LinearSolverTerminationType Factorize();
void ScaleOffDiagonalCells();
void ClusterCameras(const vector< set<int> >& visibility);
diff --git a/internal/ceres/visibility_based_preconditioner_test.cc b/internal/ceres/visibility_based_preconditioner_test.cc
index 2edbb18..c718b5e 100644
--- a/internal/ceres/visibility_based_preconditioner_test.cc
+++ b/internal/ceres/visibility_based_preconditioner_test.cc
@@ -28,6 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_NO_SUITESPARSE
#include "ceres/visibility_based_preconditioner.h"
diff --git a/internal/ceres/visibility_test.cc b/internal/ceres/visibility_test.cc
index 3cfb232..0e22f88 100644
--- a/internal/ceres/visibility_test.cc
+++ b/internal/ceres/visibility_test.cc
@@ -29,6 +29,9 @@
// Author: kushalav@google.com (Avanish Kushal)
// sameeragarwal@google.com (Sameer Agarwal)
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
#ifndef CERES_NO_SUITESPARSE
#include "ceres/visibility.h"