aboutsummaryrefslogtreecommitdiff
path: root/internal/ceres
diff options
context:
space:
mode:
authorAngus Kong <shkong@google.com>2013-02-13 14:56:04 -0800
committerAngus Kong <shkong@google.com>2013-02-14 13:33:05 -0800
commit0ae28bd5885b5daa526898fcf7c323dc2c3e1963 (patch)
tree487c4897ae7509fb4e05751b0a673a8090e26d41 /internal/ceres
parente9e9be9383b479ac1573370ab221ffbac4de8c90 (diff)
downloadceres-solver-0ae28bd5885b5daa526898fcf7c323dc2c3e1963.tar.gz
Added a NOTICE and a MODULE_LICENSE_BSD file. Added Android.mk for master build and unbundled build. Added CleanSpec.mk to optimize Android build. Change-Id: I6cd82bcabc1a94b10239f9fca017de0afd20e769
Diffstat (limited to 'internal/ceres')
-rw-r--r--internal/ceres/CMakeLists.txt264
-rw-r--r--internal/ceres/array_utils.cc67
-rw-r--r--internal/ceres/array_utils.h65
-rw-r--r--internal/ceres/array_utils_test.cc58
-rw-r--r--internal/ceres/autodiff_cost_function_test.cc146
-rw-r--r--internal/ceres/autodiff_test.cc406
-rw-r--r--internal/ceres/block_evaluate_preparer.cc83
-rw-r--r--internal/ceres/block_evaluate_preparer.h77
-rw-r--r--internal/ceres/block_jacobi_preconditioner.cc135
-rw-r--r--internal/ceres/block_jacobi_preconditioner.h84
-rw-r--r--internal/ceres/block_jacobian_writer.cc212
-rw-r--r--internal/ceres/block_jacobian_writer.h127
-rw-r--r--internal/ceres/block_random_access_dense_matrix.cc83
-rw-r--r--internal/ceres/block_random_access_dense_matrix.h98
-rw-r--r--internal/ceres/block_random_access_dense_matrix_test.cc115
-rw-r--r--internal/ceres/block_random_access_matrix.cc40
-rw-r--r--internal/ceres/block_random_access_matrix.h132
-rw-r--r--internal/ceres/block_random_access_sparse_matrix.cc158
-rw-r--r--internal/ceres/block_random_access_sparse_matrix.h111
-rw-r--r--internal/ceres/block_random_access_sparse_matrix_test.cc150
-rw-r--r--internal/ceres/block_sparse_matrix.cc286
-rw-r--r--internal/ceres/block_sparse_matrix.h144
-rw-r--r--internal/ceres/block_sparse_matrix_test.cc135
-rw-r--r--internal/ceres/block_structure.cc92
-rw-r--r--internal/ceres/block_structure.h105
-rw-r--r--internal/ceres/canonical_views_clustering.cc238
-rw-r--r--internal/ceres/canonical_views_clustering.h133
-rw-r--r--internal/ceres/canonical_views_clustering_test.cc143
-rw-r--r--internal/ceres/casts.h108
-rw-r--r--internal/ceres/cgnr_linear_operator.h120
-rw-r--r--internal/ceres/cgnr_solver.cc80
-rw-r--r--internal/ceres/cgnr_solver.h66
-rw-r--r--internal/ceres/collections_port.cc43
-rw-r--r--internal/ceres/collections_port.h167
-rw-r--r--internal/ceres/compressed_row_jacobian_writer.cc217
-rw-r--r--internal/ceres/compressed_row_jacobian_writer.h75
-rw-r--r--internal/ceres/compressed_row_sparse_matrix.cc354
-rw-r--r--internal/ceres/compressed_row_sparse_matrix.h149
-rw-r--r--internal/ceres/compressed_row_sparse_matrix_test.cc203
-rw-r--r--internal/ceres/conditioned_cost_function.cc130
-rw-r--r--internal/ceres/conditioned_cost_function_test.cc126
-rw-r--r--internal/ceres/conjugate_gradients_solver.cc233
-rw-r--r--internal/ceres/conjugate_gradients_solver.h74
-rw-r--r--internal/ceres/coordinate_descent_minimizer.cc235
-rw-r--r--internal/ceres/coordinate_descent_minimizer.h86
-rw-r--r--internal/ceres/corrector.cc125
-rw-r--r--internal/ceres/corrector.h88
-rw-r--r--internal/ceres/corrector_test.cc276
-rw-r--r--internal/ceres/cxsparse.cc130
-rw-r--r--internal/ceres/cxsparse.h90
-rw-r--r--internal/ceres/dense_jacobian_writer.h111
-rw-r--r--internal/ceres/dense_normal_cholesky_solver.cc86
-rw-r--r--internal/ceres/dense_normal_cholesky_solver.h95
-rw-r--r--internal/ceres/dense_qr_solver.cc90
-rw-r--r--internal/ceres/dense_qr_solver.h101
-rw-r--r--internal/ceres/dense_sparse_matrix.cc209
-rw-r--r--internal/ceres/dense_sparse_matrix.h117
-rw-r--r--internal/ceres/dense_sparse_matrix_test.cc232
-rw-r--r--internal/ceres/detect_structure.cc114
-rw-r--r--internal/ceres/detect_structure.h63
-rw-r--r--internal/ceres/dogleg_strategy.cc691
-rw-r--r--internal/ceres/dogleg_strategy.h163
-rw-r--r--internal/ceres/dogleg_strategy_test.cc291
-rw-r--r--internal/ceres/evaluator.cc147
-rw-r--r--internal/ceres/evaluator.h160
-rw-r--r--internal/ceres/evaluator_test.cc959
-rw-r--r--internal/ceres/file.cc93
-rw-r--r--internal/ceres/file.h52
-rw-r--r--internal/ceres/generate_eliminator_specialization.py186
-rw-r--r--internal/ceres/generated/schur_eliminator_2_2_2.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_2_2_3.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_2_2_4.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_2_2_d.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_2_3_3.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_2_3_4.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_2_3_9.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_2_3_d.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_2_4_3.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_2_4_4.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_2_4_d.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_4_4_2.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_4_4_3.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_4_4_4.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_4_4_d.cc53
-rw-r--r--internal/ceres/generated/schur_eliminator_d_d_d.cc53
-rw-r--r--internal/ceres/gmock/gmock.h12822
-rw-r--r--internal/ceres/gmock/mock-log.h160
-rw-r--r--internal/ceres/gmock_gtest_all.cc10554
-rw-r--r--internal/ceres/gmock_main.cc62
-rw-r--r--internal/ceres/gradient_checker_test.cc193
-rw-r--r--internal/ceres/gradient_checking_cost_function.cc308
-rw-r--r--internal/ceres/gradient_checking_cost_function.h85
-rw-r--r--internal/ceres/gradient_checking_cost_function_test.cc417
-rw-r--r--internal/ceres/graph.h160
-rw-r--r--internal/ceres/graph_algorithms.h270
-rw-r--r--internal/ceres/graph_algorithms_test.cc200
-rw-r--r--internal/ceres/graph_test.cc107
-rw-r--r--internal/ceres/gtest/gtest.h19537
-rw-r--r--internal/ceres/implicit_schur_complement.cc228
-rw-r--r--internal/ceres/implicit_schur_complement.h168
-rw-r--r--internal/ceres/implicit_schur_complement_test.cc196
-rw-r--r--internal/ceres/integral_types.h92
-rw-r--r--internal/ceres/iterative_schur_complement_solver.cc145
-rw-r--r--internal/ceres/iterative_schur_complement_solver.h94
-rw-r--r--internal/ceres/iterative_schur_complement_solver_test.cc123
-rw-r--r--internal/ceres/jet_test.cc335
-rw-r--r--internal/ceres/levenberg_marquardt_strategy.cc144
-rw-r--r--internal/ceres/levenberg_marquardt_strategy.h86
-rw-r--r--internal/ceres/levenberg_marquardt_strategy_test.cc157
-rw-r--r--internal/ceres/linear_least_squares_problems.cc769
-rw-r--r--internal/ceres/linear_least_squares_problems.h87
-rw-r--r--internal/ceres/linear_operator.cc40
-rw-r--r--internal/ceres/linear_operator.h59
-rw-r--r--internal/ceres/linear_solver.cc93
-rw-r--r--internal/ceres/linear_solver.h303
-rw-r--r--internal/ceres/local_parameterization.cc141
-rw-r--r--internal/ceres/local_parameterization_test.cc257
-rw-r--r--internal/ceres/loss_function.cc157
-rw-r--r--internal/ceres/loss_function_test.cc227
-rw-r--r--internal/ceres/map_util.h129
-rw-r--r--internal/ceres/matrix.proto143
-rw-r--r--internal/ceres/matrix_proto.h40
-rw-r--r--internal/ceres/miniglog/glog/logging.cc39
-rw-r--r--internal/ceres/miniglog/glog/logging.h391
-rw-r--r--internal/ceres/minimizer.h149
-rw-r--r--internal/ceres/minimizer_test.cc63
-rw-r--r--internal/ceres/mutex.h322
-rw-r--r--internal/ceres/normal_prior.cc66
-rw-r--r--internal/ceres/normal_prior_test.cc133
-rw-r--r--internal/ceres/numeric_diff_cost_function_test.cc271
-rw-r--r--internal/ceres/ordered_groups_test.cc163
-rw-r--r--internal/ceres/parameter_block.h271
-rw-r--r--internal/ceres/parameter_block_ordering.cc122
-rw-r--r--internal/ceres/parameter_block_ordering.h78
-rw-r--r--internal/ceres/parameter_block_ordering_test.cc177
-rw-r--r--internal/ceres/parameter_block_test.cc173
-rw-r--r--internal/ceres/partitioned_matrix_view.cc315
-rw-r--r--internal/ceres/partitioned_matrix_view.h121
-rw-r--r--internal/ceres/partitioned_matrix_view_test.cc191
-rw-r--r--internal/ceres/polynomial_solver.cc183
-rw-r--r--internal/ceres/polynomial_solver.h65
-rw-r--r--internal/ceres/polynomial_solver_test.cc223
-rw-r--r--internal/ceres/problem.cc189
-rw-r--r--internal/ceres/problem_impl.cc427
-rw-r--r--internal/ceres/problem_impl.h148
-rw-r--r--internal/ceres/problem_test.cc335
-rw-r--r--internal/ceres/program.cc232
-rw-r--r--internal/ceres/program.h129
-rw-r--r--internal/ceres/program_evaluator.h335
-rw-r--r--internal/ceres/random.h70
-rw-r--r--internal/ceres/residual_block.cc215
-rw-r--r--internal/ceres/residual_block.h124
-rw-r--r--internal/ceres/residual_block_test.cc326
-rw-r--r--internal/ceres/residual_block_utils.cc156
-rw-r--r--internal/ceres/residual_block_utils.h80
-rw-r--r--internal/ceres/residual_block_utils_test.cc165
-rw-r--r--internal/ceres/rotation_test.cc970
-rw-r--r--internal/ceres/runtime_numeric_diff_cost_function.cc217
-rw-r--r--internal/ceres/runtime_numeric_diff_cost_function.h87
-rw-r--r--internal/ceres/runtime_numeric_diff_cost_function_test.cc222
-rw-r--r--internal/ceres/schur_complement_solver.cc387
-rw-r--r--internal/ceres/schur_complement_solver.h187
-rw-r--r--internal/ceres/schur_complement_solver_test.cc168
-rw-r--r--internal/ceres/schur_eliminator.cc141
-rw-r--r--internal/ceres/schur_eliminator.h339
-rw-r--r--internal/ceres/schur_eliminator_impl.h724
-rw-r--r--internal/ceres/schur_eliminator_test.cc229
-rw-r--r--internal/ceres/scratch_evaluate_preparer.cc78
-rw-r--r--internal/ceres/scratch_evaluate_preparer.h69
-rw-r--r--internal/ceres/solver.cc240
-rw-r--r--internal/ceres/solver_impl.cc1100
-rw-r--r--internal/ceres/solver_impl.h140
-rw-r--r--internal/ceres/solver_impl_test.cc880
-rw-r--r--internal/ceres/sparse_matrix.cc40
-rw-r--r--internal/ceres/sparse_matrix.h114
-rw-r--r--internal/ceres/sparse_normal_cholesky_solver.cc253
-rw-r--r--internal/ceres/sparse_normal_cholesky_solver.h93
-rw-r--r--internal/ceres/split.cc115
-rw-r--r--internal/ceres/split.h21
-rw-r--r--internal/ceres/stl_util.h75
-rw-r--r--internal/ceres/stringprintf.cc126
-rw-r--r--internal/ceres/stringprintf.h89
-rw-r--r--internal/ceres/suitesparse.cc346
-rw-r--r--internal/ceres/suitesparse.h233
-rw-r--r--internal/ceres/suitesparse_test.cc194
-rw-r--r--internal/ceres/symmetric_linear_solver_test.cc139
-rw-r--r--internal/ceres/system_test.cc537
-rw-r--r--internal/ceres/test_util.cc127
-rw-r--r--internal/ceres/test_util.h71
-rw-r--r--internal/ceres/triplet_sparse_matrix.cc307
-rw-r--r--internal/ceres/triplet_sparse_matrix.h137
-rw-r--r--internal/ceres/triplet_sparse_matrix_test.cc354
-rw-r--r--internal/ceres/trust_region_minimizer.cc557
-rw-r--r--internal/ceres/trust_region_minimizer.h67
-rw-r--r--internal/ceres/trust_region_minimizer_test.cc376
-rw-r--r--internal/ceres/trust_region_strategy.cc27
-rw-r--r--internal/ceres/trust_region_strategy.h148
-rw-r--r--internal/ceres/types.cc209
-rw-r--r--internal/ceres/unsymmetric_linear_solver_test.cc139
-rw-r--r--internal/ceres/visibility.cc149
-rw-r--r--internal/ceres/visibility.h77
-rw-r--r--internal/ceres/visibility_based_preconditioner.cc613
-rw-r--r--internal/ceres/visibility_based_preconditioner.h273
-rw-r--r--internal/ceres/visibility_based_preconditioner_test.cc362
-rw-r--r--internal/ceres/visibility_test.cc203
-rw-r--r--internal/ceres/wall_time.cc49
-rw-r--r--internal/ceres/wall_time.h44
207 files changed, 80247 insertions, 0 deletions
diff --git a/internal/ceres/CMakeLists.txt b/internal/ceres/CMakeLists.txt
new file mode 100644
index 0000000..adad6dc
--- /dev/null
+++ b/internal/ceres/CMakeLists.txt
@@ -0,0 +1,264 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+# http://code.google.com/p/ceres-solver/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: keir@google.com (Keir Mierle)
+
+SET(CERES_INTERNAL_SRC
+ array_utils.cc
+ block_evaluate_preparer.cc
+ block_jacobi_preconditioner.cc
+ block_jacobian_writer.cc
+ block_random_access_dense_matrix.cc
+ block_random_access_matrix.cc
+ block_random_access_sparse_matrix.cc
+ block_sparse_matrix.cc
+ block_structure.cc
+ canonical_views_clustering.cc
+ cgnr_solver.cc
+ compressed_row_jacobian_writer.cc
+ compressed_row_sparse_matrix.cc
+ conditioned_cost_function.cc
+ conjugate_gradients_solver.cc
+ coordinate_descent_minimizer.cc
+ corrector.cc
+ cxsparse.cc
+ dense_normal_cholesky_solver.cc
+ dense_qr_solver.cc
+ dense_sparse_matrix.cc
+ detect_structure.cc
+ dogleg_strategy.cc
+ evaluator.cc
+ file.cc
+ gradient_checking_cost_function.cc
+ implicit_schur_complement.cc
+ iterative_schur_complement_solver.cc
+ levenberg_marquardt_strategy.cc
+ linear_least_squares_problems.cc
+ linear_operator.cc
+ linear_solver.cc
+ local_parameterization.cc
+ loss_function.cc
+ normal_prior.cc
+ parameter_block_ordering.cc
+ partitioned_matrix_view.cc
+ polynomial_solver.cc
+ problem.cc
+ problem_impl.cc
+ program.cc
+ residual_block.cc
+ residual_block_utils.cc
+ runtime_numeric_diff_cost_function.cc
+ schur_complement_solver.cc
+ schur_eliminator.cc
+ scratch_evaluate_preparer.cc
+ solver.cc
+ solver_impl.cc
+ sparse_matrix.cc
+ sparse_normal_cholesky_solver.cc
+ split.cc
+ stringprintf.cc
+ suitesparse.cc
+ triplet_sparse_matrix.cc
+ trust_region_minimizer.cc
+ trust_region_strategy.cc
+ types.cc
+ visibility.cc
+ visibility_based_preconditioner.cc
+ wall_time.cc
+)
+
+If (${PROTOBUF_FOUND})
+ PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS matrix.proto)
+ENDIF (${PROTOBUF_FOUND})
+
+# Also depend on the header files so that they appear in IDEs.
+FILE(GLOB CERES_INTERNAL_HDRS *.h)
+
+# Include the specialized schur solvers.
+IF (${SCHUR_SPECIALIZATIONS})
+ FILE(GLOB CERES_INTERNAL_SCHUR_FILES generated/*.cc)
+ELSE (${SCHUR_SPECIALIZATIONS})
+ # Only the fully dynamic solver. The build is much faster this way.
+ FILE(GLOB CERES_INTERNAL_SCHUR_FILES generated/schur_eliminator_d_d_d.cc)
+ENDIF (${SCHUR_SPECIALIZATIONS})
+
+# For Android, use the internal Glog implementation.
+IF (${BUILD_ANDROID})
+ ADD_LIBRARY(miniglog STATIC
+ miniglog/glog/logging.cc)
+
+ # The Android logging library that defines e.g. __android_log_print is
+ # creatively named "log".
+ TARGET_LINK_LIBRARIES(miniglog log)
+
+ INSTALL(TARGETS miniglog
+ RUNTIME DESTINATION bin
+ LIBRARY DESTINATION lib
+ ARCHIVE DESTINATION lib)
+ENDIF (${BUILD_ANDROID})
+
+SET(CERES_LIBRARY_DEPENDENCIES ${GLOG_LIB})
+
+IF (${GFLAGS})
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${GFLAGS_LIB})
+ENDIF (${GFLAGS})
+
+IF (${SUITESPARSE_FOUND})
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CHOLMOD_LIB})
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CCOLAMD_LIB})
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CAMD_LIB})
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${COLAMD_LIB})
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${AMD_LIB})
+ IF (EXISTS ${SUITESPARSE_CONFIG_LIB})
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${SUITESPARSE_CONFIG_LIB})
+ ENDIF (EXISTS ${SUITESPARSE_CONFIG_LIB})
+
+ IF (EXISTS ${METIS_LIB})
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${METIS_LIB})
+ ENDIF (EXISTS ${METIS_LIB})
+
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${LAPACK_LIB})
+
+ IF (EXISTS ${BLAS_LIB})
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${BLAS_LIB})
+ ENDIF (EXISTS ${BLAS_LIB})
+ENDIF (${SUITESPARSE_FOUND})
+
+IF (${CXSPARSE_FOUND})
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${CXSPARSE_LIB})
+ENDIF (${CXSPARSE_FOUND})
+
+IF (${OPENMP_FOUND})
+ IF (NOT MSVC)
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES gomp)
+ ENDIF (NOT MSVC)
+ENDIF (${OPENMP_FOUND})
+
+IF (${PROTOBUF_FOUND})
+ LIST(APPEND CERES_LIBRARY_DEPENDENCIES ${PROTOBUF_LIBRARY})
+ENDIF (${PROTOBUF_FOUND})
+
+SET(CERES_LIBRARY_SOURCE
+ ${PROTO_SRCS}
+ ${PROTO_HDRS}
+ ${CERES_INTERNAL_SRC}
+ ${CERES_INTERNAL_HDRS}
+ ${CERES_INTERNAL_SCHUR_FILES})
+
+ADD_LIBRARY(ceres STATIC ${CERES_LIBRARY_SOURCE})
+TARGET_LINK_LIBRARIES(ceres ${CERES_LIBRARY_DEPENDENCIES})
+
+INSTALL(TARGETS ceres
+ RUNTIME DESTINATION bin
+ LIBRARY DESTINATION lib
+ ARCHIVE DESTINATION lib)
+
+# Don't build a DLL on MSVC. Supporting Ceres as a DLL on Windows involves
+# nontrivial changes that we haven't made yet.
+IF (NOT MSVC AND NOT ${BUILD_ANDROID})
+ ADD_LIBRARY(ceres_shared SHARED ${CERES_LIBRARY_SOURCE})
+ TARGET_LINK_LIBRARIES(ceres_shared ${CERES_LIBRARY_DEPENDENCIES})
+ SET_TARGET_PROPERTIES(ceres_shared PROPERTIES
+ VERSION ${CERES_VERSION}
+ SOVERSION ${CERES_ABI_VERSION})
+
+ INSTALL(TARGETS ceres_shared
+ RUNTIME DESTINATION bin
+ LIBRARY DESTINATION lib
+ ARCHIVE DESTINATION lib)
+
+ENDIF (NOT MSVC AND NOT ${BUILD_ANDROID})
+
+IF (${BUILD_TESTING} AND ${GFLAGS})
+ ADD_LIBRARY(gtest gmock_gtest_all.cc gmock_main.cc)
+ ADD_LIBRARY(test_util test_util.cc)
+ TARGET_LINK_LIBRARIES(gtest ${GFLAGS_LIB} ${GLOG_LIB})
+
+ MACRO (CERES_TEST NAME)
+ ADD_EXECUTABLE(${NAME}_test ${NAME}_test.cc)
+ TARGET_LINK_LIBRARIES(${NAME}_test test_util ceres gtest)
+ ADD_TEST(NAME ${NAME}_test
+ COMMAND ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${NAME}_test
+ --test_srcdir
+ ${CMAKE_SOURCE_DIR}/data)
+ ENDMACRO (CERES_TEST)
+
+ CERES_TEST(array_utils)
+ CERES_TEST(autodiff)
+ CERES_TEST(autodiff_cost_function)
+ CERES_TEST(block_random_access_dense_matrix)
+ CERES_TEST(block_random_access_sparse_matrix)
+ CERES_TEST(block_sparse_matrix)
+ CERES_TEST(canonical_views_clustering)
+ CERES_TEST(compressed_row_sparse_matrix)
+ CERES_TEST(conditioned_cost_function)
+ CERES_TEST(corrector)
+ CERES_TEST(dense_sparse_matrix)
+ CERES_TEST(evaluator)
+ CERES_TEST(gradient_checker)
+ CERES_TEST(gradient_checking_cost_function)
+ CERES_TEST(graph)
+ CERES_TEST(graph_algorithms)
+ CERES_TEST(implicit_schur_complement)
+ CERES_TEST(iterative_schur_complement_solver)
+ CERES_TEST(jet)
+ CERES_TEST(levenberg_marquardt_strategy)
+ CERES_TEST(dogleg_strategy)
+ CERES_TEST(local_parameterization)
+ CERES_TEST(loss_function)
+ CERES_TEST(minimizer)
+ CERES_TEST(normal_prior)
+ CERES_TEST(numeric_diff_cost_function)
+ CERES_TEST(ordered_groups)
+ CERES_TEST(parameter_block)
+ CERES_TEST(parameter_block_ordering)
+ CERES_TEST(partitioned_matrix_view)
+ CERES_TEST(polynomial_solver)
+ CERES_TEST(problem)
+ CERES_TEST(residual_block)
+ CERES_TEST(residual_block_utils)
+ CERES_TEST(rotation)
+ CERES_TEST(runtime_numeric_diff_cost_function)
+ CERES_TEST(schur_complement_solver)
+ CERES_TEST(schur_eliminator)
+ CERES_TEST(solver_impl)
+
+ IF (${SUITESPARSE_FOUND})
+ CERES_TEST(suitesparse)
+ ENDIF (${SUITESPARSE_FOUND})
+
+ CERES_TEST(symmetric_linear_solver)
+ CERES_TEST(triplet_sparse_matrix)
+ CERES_TEST(trust_region_minimizer)
+ CERES_TEST(unsymmetric_linear_solver)
+ CERES_TEST(visibility)
+ CERES_TEST(visibility_based_preconditioner)
+
+ # Put the large end to end test last.
+ CERES_TEST(system)
+ENDIF (${BUILD_TESTING} AND ${GFLAGS})
diff --git a/internal/ceres/array_utils.cc b/internal/ceres/array_utils.cc
new file mode 100644
index 0000000..673baa4
--- /dev/null
+++ b/internal/ceres/array_utils.cc
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/array_utils.h"
+
+#include <cmath>
+#include <cstddef>
+#include "ceres/fpclassify.h"
+
+namespace ceres {
+namespace internal {
+
+// It is a near impossibility that user code generates this exact
+// value in normal operation, thus we will use it to fill arrays
+// before passing them to user code. If on return an element of the
+// array still contains this value, we will assume that the user code
+// did not write to that memory location.
+const double kImpossibleValue = 1e302;
+
+bool IsArrayValid(const int size, const double* x) {
+ if (x != NULL) {
+ for (int i = 0; i < size; ++i) {
+ if (!IsFinite(x[i]) || (x[i] == kImpossibleValue)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void InvalidateArray(const int size, double* x) {
+ if (x != NULL) {
+ for (int i = 0; i < size; ++i) {
+ x[i] = kImpossibleValue;
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/array_utils.h b/internal/ceres/array_utils.h
new file mode 100644
index 0000000..99cc8d8
--- /dev/null
+++ b/internal/ceres/array_utils.h
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Utility routines for validating arrays.
+//
+// These are useful for detecting two common class of errors.
+//
+// 1. Uninitialized memory - where the user for some reason did not
+// compute part of an array, but the code expects it.
+//
+// 2. Numerical failure while computing the cost/residual/jacobian,
+// e.g. NaN, infinities etc. This is particularly useful since the
+// automatic differentiation code does computations that are not
+// evident to the user and can silently generate hard to debug errors.
+
+#ifndef CERES_INTERNAL_ARRAY_UTILS_H_
+#define CERES_INTERNAL_ARRAY_UTILS_H_
+
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+// Fill the array x with an impossible value that the user code is
+// never expected to compute.
+void InvalidateArray(int size, double* x);
+
+// Check if all the entries of the array x are valid, i.e. all the
+// values in the array should be finite and none of them should be
+// equal to the "impossible" value used by InvalidateArray.
+bool IsArrayValid(int size, const double* x);
+
+extern const double kImpossibleValue;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_ARRAY_UTILS_H_
diff --git a/internal/ceres/array_utils_test.cc b/internal/ceres/array_utils_test.cc
new file mode 100644
index 0000000..c19a44a
--- /dev/null
+++ b/internal/ceres/array_utils_test.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/array_utils.h"
+
+#include <limits>
+#include <cmath>
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(ArrayUtils, IsArrayValid) {
+ double x[3];
+ x[0] = 0.0;
+ x[1] = 1.0;
+ x[2] = 2.0;
+ EXPECT_TRUE(IsArrayValid(3, x));
+ x[1] = std::numeric_limits<double>::infinity();
+ EXPECT_FALSE(IsArrayValid(3, x));
+ x[1] = std::numeric_limits<double>::quiet_NaN();
+ EXPECT_FALSE(IsArrayValid(3, x));
+ x[1] = std::numeric_limits<double>::signaling_NaN();
+ EXPECT_FALSE(IsArrayValid(3, x));
+ EXPECT_TRUE(IsArrayValid(1, NULL));
+ InvalidateArray(3, x);
+ EXPECT_FALSE(IsArrayValid(3, x));
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/autodiff_cost_function_test.cc b/internal/ceres/autodiff_cost_function_test.cc
new file mode 100644
index 0000000..e98397a
--- /dev/null
+++ b/internal/ceres/autodiff_cost_function_test.cc
@@ -0,0 +1,146 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/autodiff_cost_function.h"
+
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "ceres/cost_function.h"
+
+namespace ceres {
+namespace internal {
+
+class BinaryScalarCost {
+ public:
+ explicit BinaryScalarCost(double a): a_(a) {}
+ template <typename T>
+ bool operator()(const T* const x, const T* const y,
+ T* cost) const {
+ cost[0] = x[0] * y[0] + x[1] * y[1] - T(a_);
+ return true;
+ }
+ private:
+ double a_;
+};
+
+TEST(AutodiffCostFunction, BilinearDifferentiationTest) {
+ CostFunction* cost_function =
+ new AutoDiffCostFunction<BinaryScalarCost, 1, 2, 2>(
+ new BinaryScalarCost(1.0));
+
+ double** parameters = new double*[2];
+ parameters[0] = new double[2];
+ parameters[1] = new double[2];
+
+ parameters[0][0] = 1;
+ parameters[0][1] = 2;
+
+ parameters[1][0] = 3;
+ parameters[1][1] = 4;
+
+ double** jacobians = new double*[2];
+ jacobians[0] = new double[2];
+ jacobians[1] = new double[2];
+
+ double residuals = 0.0;
+
+ cost_function->Evaluate(parameters, &residuals, NULL);
+ EXPECT_EQ(10.0, residuals);
+ cost_function->Evaluate(parameters, &residuals, jacobians);
+
+ EXPECT_EQ(3, jacobians[0][0]);
+ EXPECT_EQ(4, jacobians[0][1]);
+ EXPECT_EQ(1, jacobians[1][0]);
+ EXPECT_EQ(2, jacobians[1][1]);
+
+ delete[] jacobians[0];
+ delete[] jacobians[1];
+ delete[] parameters[0];
+ delete[] parameters[1];
+ delete[] jacobians;
+ delete[] parameters;
+ delete cost_function;
+}
+
+struct TenParameterCost {
+ template <typename T>
+ bool operator()(const T* const x0,
+ const T* const x1,
+ const T* const x2,
+ const T* const x3,
+ const T* const x4,
+ const T* const x5,
+ const T* const x6,
+ const T* const x7,
+ const T* const x8,
+ const T* const x9,
+ T* cost) const {
+ cost[0] = *x0 + *x1 + *x2 + *x3 + *x4 + *x5 + *x6 + *x7 + *x8 + *x9;
+ return true;
+ }
+};
+
+TEST(AutodiffCostFunction, ManyParameterAutodiffInstantiates) {
+ CostFunction* cost_function =
+ new AutoDiffCostFunction<
+ TenParameterCost, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1>(
+ new TenParameterCost);
+
+ double** parameters = new double*[10];
+ double** jacobians = new double*[10];
+ for (int i = 0; i < 10; ++i) {
+ parameters[i] = new double[1];
+ parameters[i][0] = i;
+ jacobians[i] = new double[1];
+ }
+
+ double residuals = 0.0;
+
+ cost_function->Evaluate(parameters, &residuals, NULL);
+ EXPECT_EQ(45.0, residuals);
+
+ cost_function->Evaluate(parameters, &residuals, jacobians);
+ EXPECT_EQ(residuals, 45.0);
+ for (int i = 0; i < 10; ++i) {
+ EXPECT_EQ(1.0, jacobians[i][0]);
+ }
+
+ for (int i = 0; i < 10; ++i) {
+ delete[] jacobians[i];
+ delete[] parameters[i];
+ }
+ delete[] jacobians;
+ delete[] parameters;
+ delete cost_function;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/autodiff_test.cc b/internal/ceres/autodiff_test.cc
new file mode 100644
index 0000000..d0bf97a
--- /dev/null
+++ b/internal/ceres/autodiff_test.cc
@@ -0,0 +1,406 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/internal/autodiff.h"
+
+#include "gtest/gtest.h"
+#include "ceres/random.h"
+
+namespace ceres {
+namespace internal {
+
+template <typename T> inline
+T &RowMajorAccess(T *base, int rows, int cols, int i, int j) {
+ return base[cols * i + j];
+}
+
+// Do (symmetric) finite differencing using the given function object 'b' of
+// type 'B' and scalar type 'T' with step size 'del'.
+//
+// The type B should have a signature
+//
+// bool operator()(T const *, T *) const;
+//
+// which maps a vector of parameters to a vector of outputs.
+template <typename B, typename T, int M, int N> inline
+bool SymmetricDiff(const B& b,
+ const T par[N],
+ T del, // step size.
+ T fun[M],
+ T jac[M * N]) { // row-major.
+ if (!b(par, fun)) {
+ return false;
+ }
+
+ // Temporary parameter vector.
+ T tmp_par[N];
+ for (int j = 0; j < N; ++j) {
+ tmp_par[j] = par[j];
+ }
+
+ // For each dimension, we do one forward step and one backward step in
+ // parameter space, and store the output vector vectors in these vectors.
+ T fwd_fun[M];
+ T bwd_fun[M];
+
+ for (int j = 0; j < N; ++j) {
+ // Forward step.
+ tmp_par[j] = par[j] + del;
+ if (!b(tmp_par, fwd_fun)) {
+ return false;
+ }
+
+ // Backward step.
+ tmp_par[j] = par[j] - del;
+ if (!b(tmp_par, bwd_fun)) {
+ return false;
+ }
+
+ // Symmetric differencing:
+ // f'(a) = (f(a + h) - f(a - h)) / (2 h)
+ for (int i = 0; i < M; ++i) {
+ RowMajorAccess(jac, M, N, i, j) =
+ (fwd_fun[i] - bwd_fun[i]) / (T(2) * del);
+ }
+
+ // Restore our temporary vector.
+ tmp_par[j] = par[j];
+ }
+
+ return true;
+}
+
+template <typename A> inline
+void QuaternionToScaledRotation(A const q[4], A R[3 * 3]) {
+ // Make convenient names for elements of q.
+ A a = q[0];
+ A b = q[1];
+ A c = q[2];
+ A d = q[3];
+ // This is not to eliminate common sub-expression, but to
+ // make the lines shorter so that they fit in 80 columns!
+ A aa = a*a;
+ A ab = a*b;
+ A ac = a*c;
+ A ad = a*d;
+ A bb = b*b;
+ A bc = b*c;
+ A bd = b*d;
+ A cc = c*c;
+ A cd = c*d;
+ A dd = d*d;
+#define R(i, j) RowMajorAccess(R, 3, 3, (i), (j))
+ R(0, 0) = aa+bb-cc-dd; R(0, 1) = A(2)*(bc-ad); R(0, 2) = A(2)*(ac+bd); // NOLINT
+ R(1, 0) = A(2)*(ad+bc); R(1, 1) = aa-bb+cc-dd; R(1, 2) = A(2)*(cd-ab); // NOLINT
+ R(2, 0) = A(2)*(bd-ac); R(2, 1) = A(2)*(ab+cd); R(2, 2) = aa-bb-cc+dd; // NOLINT
+#undef R
+}
+
+// A structure for projecting a 3x4 camera matrix and a
+// homogeneous 3D point, to a 2D inhomogeneous point.
+struct Projective {
+ // Function that takes P and X as separate vectors:
+ // P, X -> x
+ template <typename A>
+ bool operator()(A const P[12], A const X[4], A x[2]) const {
+ A PX[3];
+ for (int i = 0; i < 3; ++i) {
+ PX[i] = RowMajorAccess(P, 3, 4, i, 0) * X[0] +
+ RowMajorAccess(P, 3, 4, i, 1) * X[1] +
+ RowMajorAccess(P, 3, 4, i, 2) * X[2] +
+ RowMajorAccess(P, 3, 4, i, 3) * X[3];
+ }
+ if (PX[2] != 0.0) {
+ x[0] = PX[0] / PX[2];
+ x[1] = PX[1] / PX[2];
+ return true;
+ }
+ return false;
+ }
+
+ // Version that takes P and X packed in one vector:
+ //
+ // (P, X) -> x
+ //
+ template <typename A>
+ bool operator()(A const P_X[12 + 4], A x[2]) const {
+ return operator()(P_X + 0, P_X + 12, x);
+ }
+};
+
+// Test projective camera model projector.
+TEST(AutoDiff, ProjectiveCameraModel) {
+ srand(5);
+ double const tol = 1e-10; // floating-point tolerance.
+ double const del = 1e-4; // finite-difference step.
+ double const err = 1e-6; // finite-difference tolerance.
+
+ Projective b;
+
+ // Make random P and X, in a single vector.
+ double PX[12 + 4];
+ for (int i = 0; i < 12 + 4; ++i) {
+ PX[i] = RandDouble();
+ }
+
+ // Handy names for the P and X parts.
+ double *P = PX + 0;
+ double *X = PX + 12;
+
+ // Apply the mapping, to get image point b_x.
+ double b_x[2];
+ b(P, X, b_x);
+
+ // Use finite differencing to estimate the Jacobian.
+ double fd_x[2];
+ double fd_J[2 * (12 + 4)];
+ ASSERT_TRUE((SymmetricDiff<Projective, double, 2, 12 + 4>(b, PX, del,
+ fd_x, fd_J)));
+
+ for (int i = 0; i < 2; ++i) {
+ ASSERT_EQ(fd_x[i], b_x[i]);
+ }
+
+ // Use automatic differentiation to compute the Jacobian.
+ double ad_x1[2];
+ double J_PX[2 * (12 + 4)];
+ {
+ double *parameters[] = { PX };
+ double *jacobians[] = { J_PX };
+ ASSERT_TRUE((AutoDiff<Projective, double, 12 + 4>::Differentiate(
+ b, parameters, 2, ad_x1, jacobians)));
+
+ for (int i = 0; i < 2; ++i) {
+ ASSERT_NEAR(ad_x1[i], b_x[i], tol);
+ }
+ }
+
+ // Use automatic differentiation (again), with two arguments.
+ {
+ double ad_x2[2];
+ double J_P[2 * 12];
+ double J_X[2 * 4];
+ double *parameters[] = { P, X };
+ double *jacobians[] = { J_P, J_X };
+ ASSERT_TRUE((AutoDiff<Projective, double, 12, 4>::Differentiate(
+ b, parameters, 2, ad_x2, jacobians)));
+
+ for (int i = 0; i < 2; ++i) {
+ ASSERT_NEAR(ad_x2[i], b_x[i], tol);
+ }
+
+ // Now compare the jacobians we got.
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 12 + 4; ++j) {
+ ASSERT_NEAR(J_PX[(12 + 4) * i + j], fd_J[(12 + 4) * i + j], err);
+ }
+
+ for (int j = 0; j < 12; ++j) {
+ ASSERT_NEAR(J_PX[(12 + 4) * i + j], J_P[12 * i + j], tol);
+ }
+ for (int j = 0; j < 4; ++j) {
+ ASSERT_NEAR(J_PX[(12 + 4) * i + 12 + j], J_X[4 * i + j], tol);
+ }
+ }
+ }
+}
+
+// Object to implement the projection by a calibrated camera.
+struct Metric {
+ // The mapping is
+ //
+ // q, c, X -> x = dehomg(R(q) (X - c))
+ //
+ // where q is a quaternion and c is the center of projection.
+ //
+ // This function takes three input vectors.
+ template <typename A>
+ bool operator()(A const q[4], A const c[3], A const X[3], A x[2]) const {
+ A R[3 * 3];
+ QuaternionToScaledRotation(q, R);
+
+ // Convert the quaternion mapping all the way to projective matrix.
+ A P[3 * 4];
+
+ // Set P(:, 1:3) = R
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ RowMajorAccess(P, 3, 4, i, j) = RowMajorAccess(R, 3, 3, i, j);
+ }
+ }
+
+ // Set P(:, 4) = - R c
+ for (int i = 0; i < 3; ++i) {
+ RowMajorAccess(P, 3, 4, i, 3) =
+ - (RowMajorAccess(R, 3, 3, i, 0) * c[0] +
+ RowMajorAccess(R, 3, 3, i, 1) * c[1] +
+ RowMajorAccess(R, 3, 3, i, 2) * c[2]);
+ }
+
+ A X1[4] = { X[0], X[1], X[2], A(1) };
+ Projective p;
+ return p(P, X1, x);
+ }
+
+ // A version that takes a single vector.
+ template <typename A>
+ bool operator()(A const q_c_X[4 + 3 + 3], A x[2]) const {
+ return operator()(q_c_X, q_c_X + 4, q_c_X + 4 + 3, x);
+ }
+};
+
+// This test is similar in structure to the previous one.
+TEST(AutoDiff, Metric) {
+ srand(5);
+ double const tol = 1e-10; // floating-point tolerance.
+ double const del = 1e-4; // finite-difference step.
+ double const err = 1e-5; // finite-difference tolerance.
+
+ Metric b;
+
+ // Make random parameter vector.
+ double qcX[4 + 3 + 3];
+ for (int i = 0; i < 4 + 3 + 3; ++i)
+ qcX[i] = RandDouble();
+
+ // Handy names.
+ double *q = qcX;
+ double *c = qcX + 4;
+ double *X = qcX + 4 + 3;
+
+ // Compute projection, b_x.
+ double b_x[2];
+ ASSERT_TRUE(b(q, c, X, b_x));
+
+ // Finite differencing estimate of Jacobian.
+ double fd_x[2];
+ double fd_J[2 * (4 + 3 + 3)];
+ ASSERT_TRUE((SymmetricDiff<Metric, double, 2, 4 + 3 + 3>(b, qcX, del,
+ fd_x, fd_J)));
+
+ for (int i = 0; i < 2; ++i) {
+ ASSERT_NEAR(fd_x[i], b_x[i], tol);
+ }
+
+ // Automatic differentiation.
+ double ad_x[2];
+ double J_q[2 * 4];
+ double J_c[2 * 3];
+ double J_X[2 * 3];
+ double *parameters[] = { q, c, X };
+ double *jacobians[] = { J_q, J_c, J_X };
+ ASSERT_TRUE((AutoDiff<Metric, double, 4, 3, 3>::Differentiate(
+ b, parameters, 2, ad_x, jacobians)));
+
+ for (int i = 0; i < 2; ++i) {
+ ASSERT_NEAR(ad_x[i], b_x[i], tol);
+ }
+
+ // Compare the pieces.
+ for (int i = 0; i < 2; ++i) {
+ for (int j = 0; j < 4; ++j) {
+ ASSERT_NEAR(J_q[4 * i + j], fd_J[(4 + 3 + 3) * i + j], err);
+ }
+ for (int j = 0; j < 3; ++j) {
+ ASSERT_NEAR(J_c[3 * i + j], fd_J[(4 + 3 + 3) * i + j + 4], err);
+ }
+ for (int j = 0; j < 3; ++j) {
+ ASSERT_NEAR(J_X[3 * i + j], fd_J[(4 + 3 + 3) * i + j + 4 + 3], err);
+ }
+ }
+}
+
+struct VaryingResidualFunctor {
+ template <typename T>
+ bool operator()(const T x[2], T* y) const {
+ for (int i = 0; i < num_residuals; ++i) {
+ y[i] = T(i) * x[0] * x[1] * x[1];
+ }
+ return true;
+ }
+
+ int num_residuals;
+};
+
+TEST(AutoDiff, VaryingNumberOfResidualsForOneCostFunctorType) {
+ double x[2] = { 1.0, 5.5 };
+ double *parameters[] = { x };
+ const int kMaxResiduals = 10;
+ double J_x[2 * kMaxResiduals];
+ double residuals[kMaxResiduals];
+ double *jacobians[] = { J_x };
+
+ // Use a single functor, but tweak it to produce different numbers of
+ // residuals.
+ VaryingResidualFunctor functor;
+
+ for (int num_residuals = 1; num_residuals < kMaxResiduals; ++num_residuals) {
+ // Tweak the number of residuals to produce.
+ functor.num_residuals = num_residuals;
+
+ // Run autodiff with the new number of residuals.
+ ASSERT_TRUE((AutoDiff<VaryingResidualFunctor, double, 2>::Differentiate(
+ functor, parameters, num_residuals, residuals, jacobians)));
+
+ const double kTolerance = 1e-14;
+ for (int i = 0; i < num_residuals; ++i) {
+ EXPECT_NEAR(J_x[2 * i + 0], i * x[1] * x[1], kTolerance) << "i: " << i;
+ EXPECT_NEAR(J_x[2 * i + 1], 2 * i * x[0] * x[1], kTolerance) << "i: " << i;
+ }
+ }
+}
+
+// This is fragile test that triggers the alignment bug on
+// i686-apple-darwin10-llvm-g++-4.2 (GCC) 4.2.1. It is quite possible,
+// that other combinations of operating system + compiler will
+// re-arrange the operations in this test.
+//
+// But this is the best (and only) way we know of to trigger this
+// problem for now. A more robust solution that guarantees the
+// alignment of Eigen types used for automatic differentiation would
+// be nice.
+TEST(AutoDiff, AlignedAllocationTest) {
+ // This int is needed to allocate 16 bits on the stack, so that the
+ // next allocation is not aligned by default.
+ char y = 0;
+
+ // This is needed to prevent the compiler from optimizing y out of
+ // this function.
+ y += 1;
+
+ typedef Jet<double, 2> JetT;
+ FixedArray<JetT, (256 * 7) / sizeof(JetT)> x(3);
+
+ // Need this to makes sure that x does not get optimized out.
+ x[0] = x[0] + JetT(1.0);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_evaluate_preparer.cc b/internal/ceres/block_evaluate_preparer.cc
new file mode 100644
index 0000000..9edc4fa
--- /dev/null
+++ b/internal/ceres/block_evaluate_preparer.cc
@@ -0,0 +1,83 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/block_evaluate_preparer.h"
+
+#include <vector>
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/parameter_block.h"
+#include "ceres/residual_block.h"
+#include "ceres/sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+void BlockEvaluatePreparer::Init(int const* const* jacobian_layout,
+ int max_derivatives_per_residual_block) {
+ jacobian_layout_ = jacobian_layout;
+ scratch_evaluate_preparer_.Init(max_derivatives_per_residual_block);
+}
+
+// Point the jacobian blocks directly into the block sparse matrix.
+void BlockEvaluatePreparer::Prepare(const ResidualBlock* residual_block,
+ int residual_block_index,
+ SparseMatrix* jacobian,
+ double** jacobians) {
+ // If the overall jacobian is not available, use the scratch space.
+ if (jacobian == NULL) {
+ scratch_evaluate_preparer_.Prepare(residual_block,
+ residual_block_index,
+ jacobian,
+ jacobians);
+ return;
+ }
+
+ double* jacobian_values =
+ down_cast<BlockSparseMatrix*>(jacobian)->mutable_values();
+
+ const int* jacobian_block_offset = jacobian_layout_[residual_block_index];
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ if (!residual_block->parameter_blocks()[j]->IsConstant()) {
+ jacobians[j] = jacobian_values + *jacobian_block_offset;
+
+ // The jacobian_block_offset can't be indexed with 'j' since the code
+ // that creates the layout strips out any blocks for inactive
+ // parameters. Instead, bump the pointer for active parameters only.
+ jacobian_block_offset++;
+ } else {
+ jacobians[j] = NULL;
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_evaluate_preparer.h b/internal/ceres/block_evaluate_preparer.h
new file mode 100644
index 0000000..354acc0
--- /dev/null
+++ b/internal/ceres/block_evaluate_preparer.h
@@ -0,0 +1,77 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A evaluate preparer which puts jacobian the evaluated jacobian blocks
+// directly into their final resting place in an overall block sparse matrix.
+// The evaluator takes care to avoid evaluating the jacobian for fixed
+// parameters.
+
+#ifndef CERES_INTERNAL_BLOCK_EVALUATE_PREPARER_H_
+#define CERES_INTERNAL_BLOCK_EVALUATE_PREPARER_H_
+
+#include "ceres/scratch_evaluate_preparer.h"
+
+namespace ceres {
+namespace internal {
+
+class ResidualBlock;
+class SparseMatrix;
+
+class BlockEvaluatePreparer {
+ public:
+ // Using Init() instead of a constructor allows for allocating this structure
+ // with new[]. This is because C++ doesn't allow passing arguments to objects
+ // constructed with new[] (as opposed to plain 'new').
+ void Init(int const* const* jacobian_layout,
+ int max_derivatives_per_residual_block);
+
+ // EvaluatePreparer interface
+
+ // Point the jacobian blocks directly into the block sparse matrix, if
+ // jacobian is non-null. Otherwise, uses an internal per-thread buffer to
+ // store the jacobians temporarily.
+ void Prepare(const ResidualBlock* residual_block,
+ int residual_block_index,
+ SparseMatrix* jacobian,
+ double** jacobians);
+
+ private:
+ int const* const* jacobian_layout_;
+
+ // For the case that the overall jacobian is not available, but the
+ // individual jacobians are requested, use a pass-through scratch evaluate
+ // preparer.
+ ScratchEvaluatePreparer scratch_evaluate_preparer_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_BLOCK_EVALUATE_PREPARER_H_
diff --git a/internal/ceres/block_jacobi_preconditioner.cc b/internal/ceres/block_jacobi_preconditioner.cc
new file mode 100644
index 0000000..474c37f
--- /dev/null
+++ b/internal/ceres/block_jacobi_preconditioner.cc
@@ -0,0 +1,135 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/block_jacobi_preconditioner.h"
+
+#include "Eigen/Cholesky"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/casts.h"
+#include "ceres/integral_types.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+BlockJacobiPreconditioner::BlockJacobiPreconditioner(const LinearOperator& A)
+ : num_rows_(A.num_rows()),
+ block_structure_(
+ *(down_cast<const BlockSparseMatrix*>(&A)->block_structure())) {
+ // Calculate the amount of storage needed.
+ int storage_needed = 0;
+ for (int c = 0; c < block_structure_.cols.size(); ++c) {
+ int size = block_structure_.cols[c].size;
+ storage_needed += size * size;
+ }
+
+ // Size the offsets and storage.
+ blocks_.resize(block_structure_.cols.size());
+ block_storage_.resize(storage_needed);
+
+ // Put pointers to the storage in the offsets.
+ double* block_cursor = &block_storage_[0];
+ for (int c = 0; c < block_structure_.cols.size(); ++c) {
+ int size = block_structure_.cols[c].size;
+ blocks_[c] = block_cursor;
+ block_cursor += size * size;
+ }
+}
+
+BlockJacobiPreconditioner::~BlockJacobiPreconditioner() {
+}
+
+void BlockJacobiPreconditioner::Update(const LinearOperator& matrix, const double* D) {
+ const BlockSparseMatrix& A = *(down_cast<const BlockSparseMatrix*>(&matrix));
+ const CompressedRowBlockStructure* bs = A.block_structure();
+
+ // Compute the diagonal blocks by block inner products.
+ std::fill(block_storage_.begin(), block_storage_.end(), 0.0);
+ for (int r = 0; r < bs->rows.size(); ++r) {
+ const int row_block_size = bs->rows[r].block.size;
+ const vector<Cell>& cells = bs->rows[r].cells;
+ const double* row_values = A.RowBlockValues(r);
+ for (int c = 0; c < cells.size(); ++c) {
+ const int col_block_size = bs->cols[cells[c].block_id].size;
+ ConstMatrixRef m(row_values + cells[c].position,
+ row_block_size,
+ col_block_size);
+
+ MatrixRef(blocks_[cells[c].block_id],
+ col_block_size,
+ col_block_size).noalias() += m.transpose() * m;
+
+ // TODO(keir): Figure out when the below expression is actually faster
+ // than doing the full rank update. The issue is that for smaller sizes,
+ // the rankUpdate() function is slower than the full product done above.
+ //
+ // On the typical bundling problems, the above product is ~5% faster.
+ //
+ // MatrixRef(blocks_[cells[c].block_id],
+ // col_block_size,
+ // col_block_size).selfadjointView<Eigen::Upper>().rankUpdate(m);
+ //
+ }
+ }
+
+ // Add the diagonal and invert each block.
+ for (int c = 0; c < bs->cols.size(); ++c) {
+ const int size = block_structure_.cols[c].size;
+ const int position = block_structure_.cols[c].position;
+ MatrixRef block(blocks_[c], size, size);
+
+ if (D != NULL) {
+ block.diagonal() += ConstVectorRef(D + position, size).array().square().matrix();
+ }
+
+ block = block.selfadjointView<Eigen::Upper>()
+ .ldlt()
+ .solve(Matrix::Identity(size, size));
+ }
+}
+
+void BlockJacobiPreconditioner::RightMultiply(const double* x, double* y) const {
+ for (int c = 0; c < block_structure_.cols.size(); ++c) {
+ const int size = block_structure_.cols[c].size;
+ const int position = block_structure_.cols[c].position;
+ ConstMatrixRef D(blocks_[c], size, size);
+ ConstVectorRef x_block(x + position, size);
+ VectorRef y_block(y + position, size);
+ y_block += D * x_block;
+ }
+}
+
+void BlockJacobiPreconditioner::LeftMultiply(const double* x, double* y) const {
+ RightMultiply(x, y);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_jacobi_preconditioner.h b/internal/ceres/block_jacobi_preconditioner.h
new file mode 100644
index 0000000..51f2655
--- /dev/null
+++ b/internal/ceres/block_jacobi_preconditioner.h
@@ -0,0 +1,84 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
+#define CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
+
+#include <vector>
+#include "ceres/linear_operator.h"
+
+namespace ceres {
+namespace internal {
+
+struct CompressedRowBlockStructure;
+class LinearOperator;
+class SparseMatrix;
+
+// A block Jacobi preconditioner. This is intended for use with conjugate
+// gradients, or other iterative symmetric solvers. To use the preconditioner,
+// create one by passing a BlockSparseMatrix as the linear operator "A" to the
+// constructor. This fixes the sparsity pattern to the pattern of the matrix
+// A^TA.
+//
+// Before each use of the preconditioner in a solve with conjugate gradients,
+// update the matrix by running Update(A, D). The values of the matrix A are
+// inspected to construct the preconditioner. The vector D is applied as the
+// D^TD diagonal term.
+class BlockJacobiPreconditioner : public LinearOperator {
+ public:
+ // A must remain valid while the BlockJacobiPreconditioner is.
+ BlockJacobiPreconditioner(const LinearOperator& A);
+ virtual ~BlockJacobiPreconditioner();
+
+ // Update the preconditioner with the values found in A. The sparsity pattern
+ // must match that of the A passed to the constructor. D is a vector that
+ // must have the same number of rows as A, and is applied as a diagonal in
+ // addition to the block diagonals of A.
+ void Update(const LinearOperator& A, const double* D);
+
+ // LinearOperator interface.
+ virtual void RightMultiply(const double* x, double* y) const;
+ virtual void LeftMultiply(const double* x, double* y) const;
+ virtual int num_rows() const { return num_rows_; }
+ virtual int num_cols() const { return num_rows_; }
+
+ private:
+ std::vector<double*> blocks_;
+ std::vector<double> block_storage_;
+ int num_rows_;
+
+ // The block structure of the matrix this preconditioner is for (e.g. J).
+ const CompressedRowBlockStructure& block_structure_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
diff --git a/internal/ceres/block_jacobian_writer.cc b/internal/ceres/block_jacobian_writer.cc
new file mode 100644
index 0000000..f90c350
--- /dev/null
+++ b/internal/ceres/block_jacobian_writer.cc
@@ -0,0 +1,212 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/block_jacobian_writer.h"
+
+#include "ceres/block_evaluate_preparer.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// Given the residual block ordering, build a lookup table to determine which
+// per-parameter jacobian goes where in the overall program jacobian.
+//
+// Since we expect to use a Schur type linear solver to solve the LM step, take
+// extra care to place the E blocks and the F blocks contiguously. E blocks are
+// the first num_eliminate_blocks parameter blocks as indicated by the parameter
+// block ordering. The remaining parameter blocks are the F blocks.
+//
+// TODO(keir): Consider if we should use a boolean for each parameter block
+// instead of num_eliminate_blocks.
+void BuildJacobianLayout(const Program& program,
+ int num_eliminate_blocks,
+ vector<int*>* jacobian_layout,
+ vector<int>* jacobian_layout_storage) {
+ const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
+
+ // Iterate over all the active residual blocks and determine how many E blocks
+ // are there. This will determine where the F blocks start in the jacobian
+ // matrix. Also compute the number of jacobian blocks.
+ int f_block_pos = 0;
+ int num_jacobian_blocks = 0;
+ for (int i = 0; i < residual_blocks.size(); ++i) {
+ ResidualBlock* residual_block = residual_blocks[i];
+ const int num_residuals = residual_block->NumResiduals();
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+ // Advance f_block_pos over each E block for this residual.
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+ if (!parameter_block->IsConstant()) {
+ // Only count blocks for active parameters.
+ num_jacobian_blocks++;
+ if (parameter_block->index() < num_eliminate_blocks) {
+ f_block_pos += num_residuals * parameter_block->LocalSize();
+ }
+ }
+ }
+ }
+
+ // We now know that the E blocks are laid out starting at zero, and the F
+ // blocks are laid out starting at f_block_pos. Iterate over the residual
+ // blocks again, and this time fill the jacobian_layout array with the
+ // position information.
+
+ jacobian_layout->resize(program.NumResidualBlocks());
+ jacobian_layout_storage->resize(num_jacobian_blocks);
+
+ int e_block_pos = 0;
+ int* jacobian_pos = &(*jacobian_layout_storage)[0];
+ for (int i = 0; i < residual_blocks.size(); ++i) {
+ const ResidualBlock* residual_block = residual_blocks[i];
+ const int num_residuals = residual_block->NumResiduals();
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+ (*jacobian_layout)[i] = jacobian_pos;
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+ const int parameter_block_index = parameter_block->index();
+ if (parameter_block->IsConstant()) {
+ continue;
+ }
+ const int jacobian_block_size =
+ num_residuals * parameter_block->LocalSize();
+ if (parameter_block_index < num_eliminate_blocks) {
+ *jacobian_pos = e_block_pos;
+ e_block_pos += jacobian_block_size;
+ } else {
+ *jacobian_pos = f_block_pos;
+ f_block_pos += jacobian_block_size;
+ }
+ jacobian_pos++;
+ }
+ }
+}
+
+} // namespace
+
+BlockJacobianWriter::BlockJacobianWriter(const Evaluator::Options& options,
+ Program* program)
+ : program_(program) {
+ CHECK_GE(options.num_eliminate_blocks, 0)
+ << "num_eliminate_blocks must be greater than 0.";
+
+ BuildJacobianLayout(*program,
+ options.num_eliminate_blocks,
+ &jacobian_layout_,
+ &jacobian_layout_storage_);
+}
+
+// Create evaluate prepareres that point directly into the final jacobian. This
+// makes the final Write() a nop.
+BlockEvaluatePreparer* BlockJacobianWriter::CreateEvaluatePreparers(
+ int num_threads) {
+ int max_derivatives_per_residual_block =
+ program_->MaxDerivativesPerResidualBlock();
+
+ BlockEvaluatePreparer* preparers = new BlockEvaluatePreparer[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ preparers[i].Init(&jacobian_layout_[0], max_derivatives_per_residual_block);
+ }
+ return preparers;
+}
+
+SparseMatrix* BlockJacobianWriter::CreateJacobian() const {
+ CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+
+ const vector<ParameterBlock*>& parameter_blocks =
+ program_->parameter_blocks();
+
+ // Construct the column blocks.
+ bs->cols.resize(parameter_blocks.size());
+ for (int i = 0, cursor = 0; i < parameter_blocks.size(); ++i) {
+ CHECK_NE(parameter_blocks[i]->index(), -1);
+ CHECK(!parameter_blocks[i]->IsConstant());
+ bs->cols[i].size = parameter_blocks[i]->LocalSize();
+ bs->cols[i].position = cursor;
+ cursor += bs->cols[i].size;
+ }
+
+ // Construct the cells in each row.
+ const vector<ResidualBlock*>& residual_blocks =
+ program_->residual_blocks();
+ int row_block_position = 0;
+ bs->rows.resize(residual_blocks.size());
+ for (int i = 0; i < residual_blocks.size(); ++i) {
+ const ResidualBlock* residual_block = residual_blocks[i];
+ CompressedRow* row = &bs->rows[i];
+
+ row->block.size = residual_block->NumResiduals();
+ row->block.position = row_block_position;
+ row_block_position += row->block.size;
+
+ // Size the row by the number of active parameters in this residual.
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+ int num_active_parameter_blocks = 0;
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ if (residual_block->parameter_blocks()[j]->index() != -1) {
+ num_active_parameter_blocks++;
+ }
+ }
+ row->cells.resize(num_active_parameter_blocks);
+
+ // Add layout information for the active parameters in this row.
+ for (int j = 0, k = 0; j < num_parameter_blocks; ++j) {
+ const ParameterBlock* parameter_block =
+ residual_block->parameter_blocks()[j];
+ if (!parameter_block->IsConstant()) {
+ Cell& cell = row->cells[k];
+ cell.block_id = parameter_block->index();
+ cell.position = jacobian_layout_[i][k];
+
+ // Only increment k for active parameters, since there is only layout
+ // information for active parameters.
+ k++;
+ }
+ }
+
+ sort(row->cells.begin(), row->cells.end(), CellLessThan);
+ }
+
+ BlockSparseMatrix* jacobian = new BlockSparseMatrix(bs);
+ CHECK_NOTNULL(jacobian);
+ return jacobian;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_jacobian_writer.h b/internal/ceres/block_jacobian_writer.h
new file mode 100644
index 0000000..140c721
--- /dev/null
+++ b/internal/ceres/block_jacobian_writer.h
@@ -0,0 +1,127 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A jacobian writer that writes to block sparse matrices. The "writer" name is
+// misleading, since the Write() operation on the block jacobian writer does not
+// write anything. Instead, the Prepare() method on the BlockEvaluatePreparers
+// makes a jacobians array which has direct pointers into the block sparse
+// jacobian. When the cost function is evaluated, the jacobian blocks get placed
+// directly in their final location.
+
+#ifndef CERES_INTERNAL_BLOCK_JACOBIAN_WRITER_H_
+#define CERES_INTERNAL_BLOCK_JACOBIAN_WRITER_H_
+
+#include <vector>
+#include "ceres/evaluator.h"
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockEvaluatePreparer;
+class Program;
+class SparseMatrix;
+
+class BlockJacobianWriter {
+ public:
+ BlockJacobianWriter(const Evaluator::Options& options,
+ Program* program);
+
+ // JacobianWriter interface.
+
+ // Create evaluate prepareres that point directly into the final jacobian.
+ // This makes the final Write() a nop.
+ BlockEvaluatePreparer* CreateEvaluatePreparers(int num_threads);
+
+ SparseMatrix* CreateJacobian() const;
+
+ void Write(int /* residual_id */,
+ int /* residual_offset */,
+ double** /* jacobians */,
+ SparseMatrix* /* jacobian */) {
+ // This is a noop since the blocks were written directly into their final
+ // position by the outside evaluate call, thanks to the jacobians array
+ // prepared by the BlockEvaluatePreparers.
+ }
+
+ private:
+ Program* program_;
+
+ // Stores the position of each residual / parameter jacobian.
+ //
+ // The block sparse matrix that this writer writes to is stored as a set of
+ // contiguos dense blocks, one after each other; see BlockSparseMatrix. The
+ // "double* values_" member of the block sparse matrix contains all of these
+ // blocks. Given a pointer to the first element of a block and the size of
+ // that block, it's possible to write to it.
+ //
+ // In the case of a block sparse jacobian, the jacobian writer needs a way to
+ // find the offset in the values_ array of each residual/parameter jacobian
+ // block.
+ //
+ // That is the purpose of jacobian_layout_.
+ //
+ // In particular, jacobian_layout_[i][j] is the offset in the values_ array of
+ // the derivative of residual block i with respect to the parameter block at
+ // active argument position j.
+ //
+ // The active qualifier means that non-active parameters do not count. Care
+ // must be taken when indexing into jacobian_layout_ to account for this.
+ // Consider a single residual example:
+ //
+ // r(x, y, z)
+ //
+ // with r in R^3, x in R^4, y in R^2, and z in R^5.
+ // Take y as a constant (non-active) parameter.
+ // Take r as residual number 0.
+ //
+ // In this case, the active arguments are only (x, z), so the active argument
+ // position for x is 0, and the active argument position for z is 1. This is
+ // similar to thinking of r as taking only 2 parameters:
+ //
+ // r(x, z)
+ //
+ // There are only 2 jacobian blocks: dr/dx and dr/dz. jacobian_layout_ would
+ // have the following contents:
+ //
+ // jacobian_layout_[0] = { 0, 12 }
+ //
+ // which indicates that dr/dx is located at values_[0], and dr/dz is at
+ // values_[12]. See BlockEvaluatePreparer::Prepare()'s comments about 'j'.
+ vector<int*> jacobian_layout_;
+
+ // The pointers in jacobian_layout_ point directly into this vector.
+ vector<int> jacobian_layout_storage_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_BLOCK_JACOBIAN_WRITER_H_
diff --git a/internal/ceres/block_random_access_dense_matrix.cc b/internal/ceres/block_random_access_dense_matrix.cc
new file mode 100644
index 0000000..aedfc74
--- /dev/null
+++ b/internal/ceres/block_random_access_dense_matrix.cc
@@ -0,0 +1,83 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_random_access_dense_matrix.h"
+
+#include <vector>
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+BlockRandomAccessDenseMatrix::BlockRandomAccessDenseMatrix(
+ const vector<int>& blocks) {
+ block_layout_.resize(blocks.size(), 0);
+ num_rows_ = 0;
+ for (int i = 0; i < blocks.size(); ++i) {
+ block_layout_[i] = num_rows_;
+ num_rows_ += blocks[i];
+ }
+
+ values_.reset(new double[num_rows_ * num_rows_]);
+ CHECK_NOTNULL(values_.get());
+ cell_info_.values = values_.get();
+ SetZero();
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+BlockRandomAccessDenseMatrix::~BlockRandomAccessDenseMatrix() {
+}
+
+CellInfo* BlockRandomAccessDenseMatrix::GetCell(const int row_block_id,
+ const int col_block_id,
+ int* row,
+ int* col,
+ int* row_stride,
+ int* col_stride) {
+ *row = block_layout_[row_block_id];
+ *col = block_layout_[col_block_id];
+ *row_stride = num_rows_;
+ *col_stride = num_rows_;
+ return &cell_info_;
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+void BlockRandomAccessDenseMatrix::SetZero() {
+ if (num_rows_) {
+ VectorRef(values_.get(), num_rows_ * num_rows_).setZero();
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_random_access_dense_matrix.h b/internal/ceres/block_random_access_dense_matrix.h
new file mode 100644
index 0000000..9f27a4c
--- /dev/null
+++ b/internal/ceres/block_random_access_dense_matrix.h
@@ -0,0 +1,98 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
+#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
+
+#include "ceres/block_random_access_matrix.h"
+
+#include <vector>
+
+#include "ceres/internal/macros.h"
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+// A square block random accessible matrix with the same row and
+// column block structure. All cells are stored in the same single
+// array, so that its also accessible as a dense matrix of size
+// num_rows x num_cols.
+//
+// This class is NOT thread safe. Since all n^2 cells are stored,
+// GetCell never returns NULL for any (row_block_id, col_block_id)
+// pair.
+//
+// ReturnCell is a nop.
+class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
+ public:
+ // blocks is a vector of block sizes. The resulting matrix has
+ // blocks.size() * blocks.size() cells.
+ explicit BlockRandomAccessDenseMatrix(const vector<int>& blocks);
+
+ // The destructor is not thread safe. It assumes that no one is
+ // modifying any cells when the matrix is being destroyed.
+ virtual ~BlockRandomAccessDenseMatrix();
+
+ // BlockRandomAccessMatrix interface.
+ virtual CellInfo* GetCell(int row_block_id,
+ int col_block_id,
+ int* row,
+ int* col,
+ int* row_stride,
+ int* col_stride);
+
+ // This is not a thread safe method, it assumes that no cell is
+ // locked.
+ virtual void SetZero();
+
+ // Since the matrix is square with the same row and column block
+ // structure, num_rows() = num_cols().
+ virtual int num_rows() const { return num_rows_; }
+ virtual int num_cols() const { return num_rows_; }
+
+ // The underlying matrix storing the cells.
+ const double* values() const { return values_.get(); }
+ double* mutable_values() { return values_.get(); }
+
+ private:
+ CellInfo cell_info_;
+ int num_rows_;
+ vector<int> block_layout_;
+ scoped_array<double> values_;
+
+ CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessDenseMatrix);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
diff --git a/internal/ceres/block_random_access_dense_matrix_test.cc b/internal/ceres/block_random_access_dense_matrix_test.cc
new file mode 100644
index 0000000..359eb93
--- /dev/null
+++ b/internal/ceres/block_random_access_dense_matrix_test.cc
@@ -0,0 +1,115 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <vector>
+#include "gtest/gtest.h"
+#include "ceres/block_random_access_dense_matrix.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(BlockRandomAccessDenseMatrix, GetCell) {
+ vector<int> blocks;
+ blocks.push_back(3);
+ blocks.push_back(4);
+ blocks.push_back(5);
+ const int num_rows = 3 + 4 + 5;
+ BlockRandomAccessDenseMatrix m(blocks);
+ EXPECT_EQ(m.num_rows(), num_rows);
+ EXPECT_EQ(m.num_cols(), num_rows);
+
+ int row_idx = 0;
+ for (int i = 0; i < blocks.size(); ++i) {
+ int col_idx = 0;
+ for (int j = 0; j < blocks.size(); ++j) {
+ int row;
+ int col;
+ int row_stride;
+ int col_stride;
+ CellInfo* cell =
+ m.GetCell(i, j, &row, &col, &row_stride, &col_stride);
+
+ EXPECT_TRUE(cell != NULL);
+ EXPECT_EQ(row, row_idx);
+ EXPECT_EQ(col, col_idx);
+ EXPECT_EQ(row_stride, 3 + 4 + 5);
+ EXPECT_EQ(col_stride, 3 + 4 + 5);
+ col_idx += blocks[j];
+ }
+ row_idx += blocks[i];
+ }
+}
+
+TEST(BlockRandomAccessDenseMatrix, WriteCell) {
+ vector<int> blocks;
+ blocks.push_back(3);
+ blocks.push_back(4);
+ blocks.push_back(5);
+ const int num_rows = 3 + 4 + 5;
+
+ BlockRandomAccessDenseMatrix m(blocks);
+
+ // Fill the cell (i,j) with (i + 1) * (j + 1)
+ for (int i = 0; i < blocks.size(); ++i) {
+ for (int j = 0; j < blocks.size(); ++j) {
+ int row;
+ int col;
+ int row_stride;
+ int col_stride;
+ CellInfo* cell = m.GetCell(
+ i, j, &row, &col, &row_stride, &col_stride);
+ MatrixRef(cell->values, row_stride, col_stride).block(
+ row, col, blocks[i], blocks[j]) =
+ (i+1) * (j+1) * Matrix::Ones(blocks[i], blocks[j]);
+ }
+ }
+
+ // Check the values in the array are correct by going over the
+ // entries of each block manually.
+ int row_idx = 0;
+ for (int i = 0; i < blocks.size(); ++i) {
+ int col_idx = 0;
+ for (int j = 0; j < blocks.size(); ++j) {
+ // Check the values of this block.
+ for (int r = 0; r < blocks[i]; ++r) {
+ for (int c = 0; c < blocks[j]; ++c) {
+ int pos = row_idx * num_rows + col_idx;
+ EXPECT_EQ(m.values()[pos], (i + 1) * (j + 1));
+ }
+ }
+ col_idx += blocks[j];
+ }
+ row_idx += blocks[i];
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_random_access_matrix.cc b/internal/ceres/block_random_access_matrix.cc
new file mode 100644
index 0000000..58fe4a1
--- /dev/null
+++ b/internal/ceres/block_random_access_matrix.cc
@@ -0,0 +1,40 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_random_access_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+BlockRandomAccessMatrix::~BlockRandomAccessMatrix() {
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_random_access_matrix.h b/internal/ceres/block_random_access_matrix.h
new file mode 100644
index 0000000..b76cb78
--- /dev/null
+++ b/internal/ceres/block_random_access_matrix.h
@@ -0,0 +1,132 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Interface for matrices that allow block based random access.
+
+#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_MATRIX_H_
+#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_MATRIX_H_
+
+#include "ceres/mutex.h"
+
+namespace ceres {
+namespace internal {
+
+// A matrix implementing the BlockRandomAccessMatrix interface is a
+// matrix whose rows and columns are divided into blocks. For example
+// the matrix A:
+//
+// 3 4 5
+// A = 5 [c_11 c_12 c_13]
+// 4 [c_21 c_22 c_23]
+//
+// has row blocks of size 5 and 4, and column blocks of size 3, 4 and
+// 5. It has six cells corresponding to the six row-column block
+// combinations.
+//
+// BlockRandomAccessMatrix objects provide access to cells c_ij using
+// the GetCell method. when a cell is present, GetCell will return a
+// CellInfo object containing a pointer to an array which contains the
+// cell as a submatrix and a mutex that guards this submatrix. If the
+// user is accessing the matrix concurrently, it is his responsibility
+// to use the mutex to exclude other writers from writing to the cell
+// concurrently.
+//
+// There is no requirement that all cells be present, i.e. the matrix
+// itself can be block sparse. When a cell is not present, the GetCell
+// method will return a NULL pointer.
+//
+// There is no requirement about how the cells are stored beyond that
+// form a dense submatrix of a larger dense matrix. Like everywhere
+// else in Ceres, RowMajor storage assumed.
+//
+// Example usage:
+//
+// BlockRandomAccessMatrix* A = new BlockRandomAccessMatrixSubClass(...)
+//
+// int row, col, row_stride, col_stride;
+// CellInfo* cell = A->GetCell(row_block_id, col_block_id,
+// &row, &col,
+// &row_stride, &col_stride);
+//
+// if (cell != NULL) {
+// MatrixRef m(cell->values, row_stride, col_stride);
+// CeresMutexLock l(&cell->m);
+// m.block(row, col, row_block_size, col_block_size) = ...
+// }
+
+// Structure to carry a pointer to the array containing a cell and the
+// Mutex guarding it.
+struct CellInfo {
+ CellInfo()
+ : values(NULL) {
+ }
+
+ explicit CellInfo(double* ptr)
+ : values(ptr) {
+ }
+
+ double* values;
+ Mutex m;
+};
+
+class BlockRandomAccessMatrix {
+ public:
+ virtual ~BlockRandomAccessMatrix();
+
+ // If the cell (row_block_id, col_block_id) is present, then return
+ // a CellInfo with a pointer to the dense matrix containing it,
+ // otherwise return NULL. The dense matrix containing this cell has
+ // size row_stride, col_stride and the cell is located at position
+ // (row, col) within this matrix.
+ //
+ // The size of the cell is row_block_size x col_block_size is
+ // assumed known to the caller. row_block_size less than or equal to
+ // row_stride and col_block_size is upper bounded by col_stride.
+ virtual CellInfo* GetCell(int row_block_id,
+ int col_block_id,
+ int* row,
+ int* col,
+ int* row_stride,
+ int* col_stride) = 0;
+
+ // Zero out the values of the array. The structure of the matrix
+ // (size and sparsity) is preserved.
+ virtual void SetZero() = 0;
+
+ // Number of scalar rows and columns in the matrix, i.e the sum of
+ // all row blocks and column block sizes respectively.
+ virtual int num_rows() const = 0;
+ virtual int num_cols() const = 0;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_MATRIX_H_
diff --git a/internal/ceres/block_random_access_sparse_matrix.cc b/internal/ceres/block_random_access_sparse_matrix.cc
new file mode 100644
index 0000000..f789436
--- /dev/null
+++ b/internal/ceres/block_random_access_sparse_matrix.cc
@@ -0,0 +1,158 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_random_access_sparse_matrix.h"
+
+#include <algorithm>
+#include <set>
+#include <utility>
+#include <vector>
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/mutex.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
+ const vector<int>& blocks,
+ const set<pair<int, int> >& block_pairs)
+ : kMaxRowBlocks(10 * 1000 * 1000),
+ blocks_(blocks) {
+ CHECK_LT(blocks.size(), kMaxRowBlocks);
+
+ // Build the row/column layout vector and count the number of scalar
+ // rows/columns.
+ int num_cols = 0;
+ vector<int> col_layout;
+ for (int i = 0; i < blocks_.size(); ++i) {
+ col_layout.push_back(num_cols);
+ num_cols += blocks_[i];
+ }
+
+ // Count the number of scalar non-zero entries and build the layout
+ // object for looking into the values array of the
+ // TripletSparseMatrix.
+ int num_nonzeros = 0;
+ for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
+ it != block_pairs.end();
+ ++it) {
+ const int row_block_size = blocks_[it->first];
+ const int col_block_size = blocks_[it->second];
+ num_nonzeros += row_block_size * col_block_size;
+ }
+
+ VLOG(1) << "Matrix Size [" << num_cols
+ << "," << num_cols
+ << "] " << num_nonzeros;
+
+ tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
+ tsm_->set_num_nonzeros(num_nonzeros);
+ int* rows = tsm_->mutable_rows();
+ int* cols = tsm_->mutable_cols();
+ double* values = tsm_->mutable_values();
+
+ int pos = 0;
+ for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
+ it != block_pairs.end();
+ ++it) {
+ const int row_block_size = blocks_[it->first];
+ const int col_block_size = blocks_[it->second];
+ layout_[IntPairToLong(it->first, it->second)] =
+ new CellInfo(values + pos);
+ pos += row_block_size * col_block_size;
+ }
+
+ // Fill the sparsity pattern of the underlying matrix.
+ for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
+ it != block_pairs.end();
+ ++it) {
+ const int row_block_id = it->first;
+ const int col_block_id = it->second;
+ const int row_block_size = blocks_[row_block_id];
+ const int col_block_size = blocks_[col_block_id];
+ int pos =
+ layout_[IntPairToLong(row_block_id, col_block_id)]->values - values;
+ for (int r = 0; r < row_block_size; ++r) {
+ for (int c = 0; c < col_block_size; ++c, ++pos) {
+ rows[pos] = col_layout[row_block_id] + r;
+ cols[pos] = col_layout[col_block_id] + c;
+ values[pos] = 1.0;
+ DCHECK_LT(rows[pos], tsm_->num_rows());
+ DCHECK_LT(cols[pos], tsm_->num_rows());
+ }
+ }
+ }
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+BlockRandomAccessSparseMatrix::~BlockRandomAccessSparseMatrix() {
+ for (LayoutType::iterator it = layout_.begin();
+ it != layout_.end();
+ ++it) {
+ delete it->second;
+ }
+}
+
+CellInfo* BlockRandomAccessSparseMatrix::GetCell(int row_block_id,
+ int col_block_id,
+ int* row,
+ int* col,
+ int* row_stride,
+ int* col_stride) {
+ const LayoutType::iterator it =
+ layout_.find(IntPairToLong(row_block_id, col_block_id));
+ if (it == layout_.end()) {
+ return NULL;
+ }
+
+ // Each cell is stored contiguously as its own little dense matrix.
+ *row = 0;
+ *col = 0;
+ *row_stride = blocks_[row_block_id];
+ *col_stride = blocks_[col_block_id];
+ return it->second;
+}
+
+// Assume that the user does not hold any locks on any cell blocks
+// when they are calling SetZero.
+void BlockRandomAccessSparseMatrix::SetZero() {
+ if (tsm_->num_nonzeros()) {
+ VectorRef(tsm_->mutable_values(),
+ tsm_->num_nonzeros()).setZero();
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_random_access_sparse_matrix.h b/internal/ceres/block_random_access_sparse_matrix.h
new file mode 100644
index 0000000..48a0043
--- /dev/null
+++ b/internal/ceres/block_random_access_sparse_matrix.h
@@ -0,0 +1,111 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_SPARSE_MATRIX_H_
+
+#include <set>
+#include <vector>
+#include <utility>
+#include "ceres/mutex.h"
+#include "ceres/block_random_access_matrix.h"
+#include "ceres/collections_port.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/integral_types.h"
+#include "ceres/internal/macros.h"
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+// A threaf safe square block sparse implementation of
+// BlockRandomAccessMatrix. Internally a TripletSparseMatrix is used
+// for doing the actual storage. This class augments this matrix with
+// an unordered_map that allows random read/write access.
+class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
+ public:
+ // blocks is an array of block sizes. block_pairs is a set of
+ // <row_block_id, col_block_id> pairs to identify the non-zero cells
+ // of this matrix.
+ BlockRandomAccessSparseMatrix(const vector<int>& blocks,
+ const set<pair<int, int> >& block_pairs);
+
+ // The destructor is not thread safe. It assumes that no one is
+ // modifying any cells when the matrix is being destroyed.
+ virtual ~BlockRandomAccessSparseMatrix();
+
+ // BlockRandomAccessMatrix Interface.
+ virtual CellInfo* GetCell(int row_block_id,
+ int col_block_id,
+ int* row,
+ int* col,
+ int* row_stride,
+ int* col_stride);
+
+ // This is not a thread safe method, it assumes that no cell is
+ // locked.
+ virtual void SetZero();
+ virtual bool IsThreadSafe() const { return true; }
+
+ // Since the matrix is square, num_rows() == num_cols().
+ virtual int num_rows() const { return tsm_->num_rows(); }
+ virtual int num_cols() const { return tsm_->num_cols(); }
+
+ // Access to the underlying matrix object.
+ const TripletSparseMatrix* matrix() const { return tsm_.get(); }
+ TripletSparseMatrix* mutable_matrix() { return tsm_.get(); }
+
+ private:
+ int64 IntPairToLong(int a, int b) {
+ return a * kMaxRowBlocks + b;
+ }
+
+ const int64 kMaxRowBlocks;
+ // row/column block sizes.
+ const vector<int> blocks_;
+
+ // A mapping from <row_block_id, col_block_id> to the position in
+ // the values array of tsm_ where the block is stored.
+ typedef HashMap<long int, CellInfo* > LayoutType;
+ LayoutType layout_;
+
+ // The underlying matrix object which actually stores the cells.
+ scoped_ptr<TripletSparseMatrix> tsm_;
+
+ friend class BlockRandomAccessSparseMatrixTest;
+ CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessSparseMatrix);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_SPARSE_MATRIX_H_
diff --git a/internal/ceres/block_random_access_sparse_matrix_test.cc b/internal/ceres/block_random_access_sparse_matrix_test.cc
new file mode 100644
index 0000000..b2396a1
--- /dev/null
+++ b/internal/ceres/block_random_access_sparse_matrix_test.cc
@@ -0,0 +1,150 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <limits>
+#include <vector>
+#include "ceres/block_random_access_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(BlockRandomAccessSparseMatrix, GetCell) {
+ vector<int> blocks;
+ blocks.push_back(3);
+ blocks.push_back(4);
+ blocks.push_back(5);
+ const int num_rows = 3 + 4 + 5;
+
+ set< pair<int, int> > block_pairs;
+ int num_nonzeros = 0;
+ block_pairs.insert(make_pair(0, 0));
+ num_nonzeros += blocks[0] * blocks[0];
+
+ block_pairs.insert(make_pair(1, 1));
+ num_nonzeros += blocks[1] * blocks[1];
+
+ block_pairs.insert(make_pair(1, 2));
+ num_nonzeros += blocks[1] * blocks[2];
+
+ block_pairs.insert(make_pair(2, 0));
+ num_nonzeros += blocks[2] * blocks[0];
+
+ BlockRandomAccessSparseMatrix m(blocks, block_pairs);
+ EXPECT_EQ(m.num_rows(), num_rows);
+ EXPECT_EQ(m.num_cols(), num_rows);
+
+ for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
+ it != block_pairs.end();
+ ++it) {
+ const int row_block_id = it->first;
+ const int col_block_id = it->second;
+ int row;
+ int col;
+ int row_stride;
+ int col_stride;
+ CellInfo* cell = m.GetCell(row_block_id, col_block_id,
+ &row, &col,
+ &row_stride, &col_stride);
+ EXPECT_TRUE(cell != NULL);
+ EXPECT_EQ(row, 0);
+ EXPECT_EQ(col, 0);
+ EXPECT_EQ(row_stride, blocks[row_block_id]);
+ EXPECT_EQ(col_stride, blocks[col_block_id]);
+
+ // Write into the block
+ MatrixRef(cell->values, row_stride, col_stride).block(
+ row, col, blocks[row_block_id], blocks[col_block_id]) =
+ (row_block_id + 1) * (col_block_id +1) *
+ Matrix::Ones(blocks[row_block_id], blocks[col_block_id]);
+ }
+
+ const TripletSparseMatrix* tsm = m.matrix();
+ EXPECT_EQ(tsm->num_nonzeros(), num_nonzeros);
+ EXPECT_EQ(tsm->max_num_nonzeros(), num_nonzeros);
+
+ Matrix dense;
+ tsm->ToDenseMatrix(&dense);
+
+ double kTolerance = 1e-14;
+
+ // (0,0)
+ EXPECT_NEAR((dense.block(0, 0, 3, 3) - Matrix::Ones(3, 3)).norm(),
+ 0.0,
+ kTolerance);
+ // (1,1)
+ EXPECT_NEAR((dense.block(3, 3, 4, 4) - 2 * 2 * Matrix::Ones(4, 4)).norm(),
+ 0.0,
+ kTolerance);
+ // (1,2)
+ EXPECT_NEAR((dense.block(3, 3 + 4, 4, 5) - 2 * 3 * Matrix::Ones(4, 5)).norm(),
+ 0.0,
+ kTolerance);
+ // (2,0)
+ EXPECT_NEAR((dense.block(3 + 4, 0, 5, 3) - 3 * 1 * Matrix::Ones(5, 3)).norm(),
+ 0.0,
+ kTolerance);
+
+ // There is nothing else in the matrix besides these four blocks.
+ EXPECT_NEAR(dense.norm(), sqrt(9. + 16. * 16. + 36. * 20. + 9. * 15.),
+ kTolerance);
+}
+
+// IntPairToLong is private, thus this fixture is needed to access and
+// test it.
+class BlockRandomAccessSparseMatrixTest : public ::testing::Test {
+ public:
+ virtual void SetUp() {
+ vector<int> blocks;
+ blocks.push_back(1);
+ set< pair<int, int> > block_pairs;
+ block_pairs.insert(make_pair(0, 0));
+ m_.reset(new BlockRandomAccessSparseMatrix(blocks, block_pairs));
+ }
+
+ void CheckIntPair(int a, int b) {
+ int64 value = m_->IntPairToLong(a, b);
+ EXPECT_GT(value, 0) << "Overflow a = " << a << " b = " << b;
+ EXPECT_GT(value, a) << "Overflow a = " << a << " b = " << b;
+ EXPECT_GT(value, b) << "Overflow a = " << a << " b = " << b;
+ }
+
+ private:
+ scoped_ptr<BlockRandomAccessSparseMatrix> m_;
+};
+
+TEST_F(BlockRandomAccessSparseMatrixTest, IntPairToLongOverflow) {
+ CheckIntPair(numeric_limits<int>::max(), numeric_limits<int>::max());
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_sparse_matrix.cc b/internal/ceres/block_sparse_matrix.cc
new file mode 100644
index 0000000..dbe5ec9
--- /dev/null
+++ b/internal/ceres/block_sparse_matrix.cc
@@ -0,0 +1,286 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_sparse_matrix.h"
+
+#include <cstddef>
+#include <algorithm>
+#include <vector>
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/matrix_proto.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+BlockSparseMatrix::~BlockSparseMatrix() {}
+
+BlockSparseMatrix::BlockSparseMatrix(
+ CompressedRowBlockStructure* block_structure)
+ : num_rows_(0),
+ num_cols_(0),
+ num_nonzeros_(0),
+ values_(NULL),
+ block_structure_(block_structure) {
+ CHECK_NOTNULL(block_structure_.get());
+
+ // Count the number of columns in the matrix.
+ for (int i = 0; i < block_structure_->cols.size(); ++i) {
+ num_cols_ += block_structure_->cols[i].size;
+ }
+
+ // Count the number of non-zero entries and the number of rows in
+ // the matrix.
+ for (int i = 0; i < block_structure_->rows.size(); ++i) {
+ int row_block_size = block_structure_->rows[i].block.size;
+ num_rows_ += row_block_size;
+
+ const vector<Cell>& cells = block_structure_->rows[i].cells;
+ for (int j = 0; j < cells.size(); ++j) {
+ int col_block_id = cells[j].block_id;
+ int col_block_size = block_structure_->cols[col_block_id].size;
+ num_nonzeros_ += col_block_size * row_block_size;
+ }
+ }
+
+ CHECK_GE(num_rows_, 0);
+ CHECK_GE(num_cols_, 0);
+ CHECK_GE(num_nonzeros_, 0);
+ VLOG(2) << "Allocating values array with "
+ << num_nonzeros_ * sizeof(double) << " bytes."; // NOLINT
+ values_.reset(new double[num_nonzeros_]);
+ CHECK_NOTNULL(values_.get());
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+BlockSparseMatrix::BlockSparseMatrix(const SparseMatrixProto& outer_proto) {
+ CHECK(outer_proto.has_block_matrix());
+
+ const BlockSparseMatrixProto& proto = outer_proto.block_matrix();
+ CHECK(proto.has_num_rows());
+ CHECK(proto.has_num_cols());
+ CHECK_EQ(proto.num_nonzeros(), proto.values_size());
+
+ num_rows_ = proto.num_rows();
+ num_cols_ = proto.num_cols();
+ num_nonzeros_ = proto.num_nonzeros();
+
+ // Copy out the values into *this.
+ values_.reset(new double[num_nonzeros_]);
+ for (int i = 0; i < proto.num_nonzeros(); ++i) {
+ values_[i] = proto.values(i);
+ }
+
+ // Create the block structure according to the proto.
+ block_structure_.reset(new CompressedRowBlockStructure);
+ ProtoToBlockStructure(proto.block_structure(), block_structure_.get());
+}
+#endif
+
+void BlockSparseMatrix::SetZero() {
+ fill(values_.get(), values_.get() + num_nonzeros_, 0.0);
+}
+
+void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
+ CHECK_NOTNULL(x);
+ CHECK_NOTNULL(y);
+
+ for (int i = 0; i < block_structure_->rows.size(); ++i) {
+ int row_block_pos = block_structure_->rows[i].block.position;
+ int row_block_size = block_structure_->rows[i].block.size;
+ VectorRef yref(y + row_block_pos, row_block_size);
+ const vector<Cell>& cells = block_structure_->rows[i].cells;
+ for (int j = 0; j < cells.size(); ++j) {
+ int col_block_id = cells[j].block_id;
+ int col_block_size = block_structure_->cols[col_block_id].size;
+ int col_block_pos = block_structure_->cols[col_block_id].position;
+ ConstVectorRef xref(x + col_block_pos, col_block_size);
+ MatrixRef m(values_.get() + cells[j].position,
+ row_block_size, col_block_size);
+ yref += m.lazyProduct(xref);
+ }
+ }
+}
+
+void BlockSparseMatrix::LeftMultiply(const double* x, double* y) const {
+ CHECK_NOTNULL(x);
+ CHECK_NOTNULL(y);
+
+ for (int i = 0; i < block_structure_->rows.size(); ++i) {
+ int row_block_pos = block_structure_->rows[i].block.position;
+ int row_block_size = block_structure_->rows[i].block.size;
+ const ConstVectorRef xref(x + row_block_pos, row_block_size);
+ const vector<Cell>& cells = block_structure_->rows[i].cells;
+ for (int j = 0; j < cells.size(); ++j) {
+ int col_block_id = cells[j].block_id;
+ int col_block_size = block_structure_->cols[col_block_id].size;
+ int col_block_pos = block_structure_->cols[col_block_id].position;
+ VectorRef yref(y + col_block_pos, col_block_size);
+ MatrixRef m(values_.get() + cells[j].position,
+ row_block_size, col_block_size);
+ yref += m.transpose().lazyProduct(xref);
+ }
+ }
+}
+
+void BlockSparseMatrix::SquaredColumnNorm(double* x) const {
+ CHECK_NOTNULL(x);
+ VectorRef(x, num_cols_).setZero();
+ for (int i = 0; i < block_structure_->rows.size(); ++i) {
+ int row_block_size = block_structure_->rows[i].block.size;
+ const vector<Cell>& cells = block_structure_->rows[i].cells;
+ for (int j = 0; j < cells.size(); ++j) {
+ int col_block_id = cells[j].block_id;
+ int col_block_size = block_structure_->cols[col_block_id].size;
+ int col_block_pos = block_structure_->cols[col_block_id].position;
+ const MatrixRef m(values_.get() + cells[j].position,
+ row_block_size, col_block_size);
+ VectorRef(x + col_block_pos, col_block_size) += m.colwise().squaredNorm();
+ }
+ }
+}
+
+void BlockSparseMatrix::ScaleColumns(const double* scale) {
+ CHECK_NOTNULL(scale);
+
+ for (int i = 0; i < block_structure_->rows.size(); ++i) {
+ int row_block_size = block_structure_->rows[i].block.size;
+ const vector<Cell>& cells = block_structure_->rows[i].cells;
+ for (int j = 0; j < cells.size(); ++j) {
+ int col_block_id = cells[j].block_id;
+ int col_block_size = block_structure_->cols[col_block_id].size;
+ int col_block_pos = block_structure_->cols[col_block_id].position;
+ MatrixRef m(values_.get() + cells[j].position,
+ row_block_size, col_block_size);
+ m *= ConstVectorRef(scale + col_block_pos, col_block_size).asDiagonal();
+ }
+ }
+}
+
+void BlockSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
+ CHECK_NOTNULL(dense_matrix);
+
+ dense_matrix->resize(num_rows_, num_cols_);
+ dense_matrix->setZero();
+ Matrix& m = *dense_matrix;
+
+ for (int i = 0; i < block_structure_->rows.size(); ++i) {
+ int row_block_pos = block_structure_->rows[i].block.position;
+ int row_block_size = block_structure_->rows[i].block.size;
+ const vector<Cell>& cells = block_structure_->rows[i].cells;
+ for (int j = 0; j < cells.size(); ++j) {
+ int col_block_id = cells[j].block_id;
+ int col_block_size = block_structure_->cols[col_block_id].size;
+ int col_block_pos = block_structure_->cols[col_block_id].position;
+ int jac_pos = cells[j].position;
+ m.block(row_block_pos, col_block_pos, row_block_size, col_block_size)
+ += MatrixRef(values_.get() + jac_pos, row_block_size, col_block_size);
+ }
+ }
+}
+
+void BlockSparseMatrix::ToTripletSparseMatrix(
+ TripletSparseMatrix* matrix) const {
+ CHECK_NOTNULL(matrix);
+
+ matrix->Reserve(num_nonzeros_);
+ matrix->Resize(num_rows_, num_cols_);
+ matrix->SetZero();
+
+ for (int i = 0; i < block_structure_->rows.size(); ++i) {
+ int row_block_pos = block_structure_->rows[i].block.position;
+ int row_block_size = block_structure_->rows[i].block.size;
+ const vector<Cell>& cells = block_structure_->rows[i].cells;
+ for (int j = 0; j < cells.size(); ++j) {
+ int col_block_id = cells[j].block_id;
+ int col_block_size = block_structure_->cols[col_block_id].size;
+ int col_block_pos = block_structure_->cols[col_block_id].position;
+ int jac_pos = cells[j].position;
+ for (int r = 0; r < row_block_size; ++r) {
+ for (int c = 0; c < col_block_size; ++c, ++jac_pos) {
+ matrix->mutable_rows()[jac_pos] = row_block_pos + r;
+ matrix->mutable_cols()[jac_pos] = col_block_pos + c;
+ matrix->mutable_values()[jac_pos] = values_[jac_pos];
+ }
+ }
+ }
+ }
+ matrix->set_num_nonzeros(num_nonzeros_);
+}
+
+// Return a pointer to the block structure. We continue to hold
+// ownership of the object though.
+const CompressedRowBlockStructure* BlockSparseMatrix::block_structure()
+ const {
+ return block_structure_.get();
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+void BlockSparseMatrix::ToProto(SparseMatrixProto* outer_proto) const {
+ outer_proto->Clear();
+
+ BlockSparseMatrixProto* proto = outer_proto->mutable_block_matrix();
+ proto->set_num_rows(num_rows_);
+ proto->set_num_cols(num_cols_);
+ proto->set_num_nonzeros(num_nonzeros_);
+ for (int i = 0; i < num_nonzeros_; ++i) {
+ proto->add_values(values_[i]);
+ }
+ BlockStructureToProto(*block_structure_, proto->mutable_block_structure());
+}
+#endif
+
+void BlockSparseMatrix::ToTextFile(FILE* file) const {
+ CHECK_NOTNULL(file);
+ for (int i = 0; i < block_structure_->rows.size(); ++i) {
+ const int row_block_pos = block_structure_->rows[i].block.position;
+ const int row_block_size = block_structure_->rows[i].block.size;
+ const vector<Cell>& cells = block_structure_->rows[i].cells;
+ for (int j = 0; j < cells.size(); ++j) {
+ const int col_block_id = cells[j].block_id;
+ const int col_block_size = block_structure_->cols[col_block_id].size;
+ const int col_block_pos = block_structure_->cols[col_block_id].position;
+ int jac_pos = cells[j].position;
+ for (int r = 0; r < row_block_size; ++r) {
+ for (int c = 0; c < col_block_size; ++c) {
+ fprintf(file, "% 10d % 10d %17f\n",
+ row_block_pos + r,
+ col_block_pos + c,
+ values_[jac_pos++]);
+ }
+ }
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_sparse_matrix.h b/internal/ceres/block_sparse_matrix.h
new file mode 100644
index 0000000..513d398
--- /dev/null
+++ b/internal/ceres/block_sparse_matrix.h
@@ -0,0 +1,144 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Implementation of the SparseMatrix interface for block sparse
+// matrices.
+
+#ifndef CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
+
+#include "ceres/block_structure.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/macros.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+class SparseMatrixProto;
+class TripletSparseMatrix;
+
+// A further extension of the SparseMatrix interface to support block-oriented
+// matrices. The key addition is the RowBlockValues() accessor, which enables
+// the lazy block sparse matrix implementation.
+class BlockSparseMatrixBase : public SparseMatrix {
+ public:
+ BlockSparseMatrixBase() {}
+ virtual ~BlockSparseMatrixBase() {}
+
+ // Convert this matrix into a triplet sparse matrix.
+ virtual void ToTripletSparseMatrix(TripletSparseMatrix* matrix) const = 0;
+
+ // Returns a pointer to the block structure. Does not transfer
+ // ownership.
+ virtual const CompressedRowBlockStructure* block_structure() const = 0;
+
+ // Returns a pointer to a row of the matrix. The returned array is only valid
+ // until the next call to RowBlockValues. The caller does not own the result.
+ //
+ // The returned array is laid out such that cells on the specified row are
+ // contiguous in the returned array, though neighbouring cells in row order
+ // may not be contiguous in the row values. The cell values for cell
+ // (row_block, cell_block) are found at offset
+ //
+ // block_structure()->rows[row_block].cells[cell_block].position
+ //
+ virtual const double* RowBlockValues(int row_block_index) const = 0;
+
+ private:
+ CERES_DISALLOW_COPY_AND_ASSIGN(BlockSparseMatrixBase);
+};
+
+// This class implements the SparseMatrix interface for storing and
+// manipulating block sparse matrices. The block structure is stored
+// in the CompressedRowBlockStructure object and one is needed to
+// initialize the matrix. For details on how the blocks structure of
+// the matrix is stored please see the documentation
+//
+// internal/ceres/block_structure.h
+//
+class BlockSparseMatrix : public BlockSparseMatrixBase {
+ public:
+ // Construct a block sparse matrix with a fully initialized
+ // CompressedRowBlockStructure objected. The matrix takes over
+ // ownership of this object and destroys it upon destruction.
+ //
+ // TODO(sameeragarwal): Add a function which will validate legal
+ // CompressedRowBlockStructure objects.
+ explicit BlockSparseMatrix(CompressedRowBlockStructure* block_structure);
+
+ // Construct a block sparse matrix from a protocol buffer.
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+ explicit BlockSparseMatrix(const SparseMatrixProto& proto);
+#endif
+
+ BlockSparseMatrix();
+ virtual ~BlockSparseMatrix();
+
+ // Implementation of SparseMatrix interface.
+ virtual void SetZero();
+ virtual void RightMultiply(const double* x, double* y) const;
+ virtual void LeftMultiply(const double* x, double* y) const;
+ virtual void SquaredColumnNorm(double* x) const;
+ virtual void ScaleColumns(const double* scale);
+ virtual void ToDenseMatrix(Matrix* dense_matrix) const;
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+ virtual void ToProto(SparseMatrixProto* proto) const;
+#endif
+ virtual void ToTextFile(FILE* file) const;
+
+ virtual int num_rows() const { return num_rows_; }
+ virtual int num_cols() const { return num_cols_; }
+ virtual int num_nonzeros() const { return num_nonzeros_; }
+ virtual const double* values() const { return values_.get(); }
+ virtual double* mutable_values() { return values_.get(); }
+
+ // Implementation of BlockSparseMatrixBase interface.
+ virtual void ToTripletSparseMatrix(TripletSparseMatrix* matrix) const;
+ virtual const CompressedRowBlockStructure* block_structure() const;
+ virtual const double* RowBlockValues(int row_block_index) const {
+ return values_.get();
+ }
+
+ private:
+ int num_rows_;
+ int num_cols_;
+ int max_num_nonzeros_;
+ int num_nonzeros_;
+ scoped_array<double> values_;
+ scoped_ptr<CompressedRowBlockStructure> block_structure_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(BlockSparseMatrix);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
diff --git a/internal/ceres/block_sparse_matrix_test.cc b/internal/ceres/block_sparse_matrix_test.cc
new file mode 100644
index 0000000..457a2fb
--- /dev/null
+++ b/internal/ceres/block_sparse_matrix_test.cc
@@ -0,0 +1,135 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_sparse_matrix.h"
+
+#include <string>
+#include "ceres/casts.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/matrix_proto.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockSparseMatrixTest : public ::testing::Test {
+ protected :
+ virtual void SetUp() {
+ scoped_ptr<LinearLeastSquaresProblem> problem(
+ CreateLinearLeastSquaresProblemFromId(2));
+ CHECK_NOTNULL(problem.get());
+ A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+
+ problem.reset(CreateLinearLeastSquaresProblemFromId(1));
+ CHECK_NOTNULL(problem.get());
+ B_.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
+
+ CHECK_EQ(A_->num_rows(), B_->num_rows());
+ CHECK_EQ(A_->num_cols(), B_->num_cols());
+ CHECK_EQ(A_->num_nonzeros(), B_->num_nonzeros());
+ }
+
+ scoped_ptr<BlockSparseMatrix> A_;
+ scoped_ptr<TripletSparseMatrix> B_;
+};
+
+TEST_F(BlockSparseMatrixTest, SetZeroTest) {
+ A_->SetZero();
+ EXPECT_EQ(13, A_->num_nonzeros());
+}
+
+TEST_F(BlockSparseMatrixTest, RightMultiplyTest) {
+ Vector y_a = Vector::Zero(A_->num_rows());
+ Vector y_b = Vector::Zero(A_->num_rows());
+ for (int i = 0; i < A_->num_cols(); ++i) {
+ Vector x = Vector::Zero(A_->num_cols());
+ x[i] = 1.0;
+ A_->RightMultiply(x.data(), y_a.data());
+ B_->RightMultiply(x.data(), y_b.data());
+ EXPECT_LT((y_a - y_b).norm(), 1e-12);
+ }
+}
+
+TEST_F(BlockSparseMatrixTest, LeftMultiplyTest) {
+ Vector y_a = Vector::Zero(A_->num_cols());
+ Vector y_b = Vector::Zero(A_->num_cols());
+ for (int i = 0; i < A_->num_rows(); ++i) {
+ Vector x = Vector::Zero(A_->num_rows());
+ x[i] = 1.0;
+ A_->LeftMultiply(x.data(), y_a.data());
+ B_->LeftMultiply(x.data(), y_b.data());
+ EXPECT_LT((y_a - y_b).norm(), 1e-12);
+ }
+}
+
+TEST_F(BlockSparseMatrixTest, SquaredColumnNormTest) {
+ Vector y_a = Vector::Zero(A_->num_cols());
+ Vector y_b = Vector::Zero(A_->num_cols());
+ A_->SquaredColumnNorm(y_a.data());
+ B_->SquaredColumnNorm(y_b.data());
+ EXPECT_LT((y_a - y_b).norm(), 1e-12);
+}
+
+TEST_F(BlockSparseMatrixTest, ToDenseMatrixTest) {
+ Matrix m_a;
+ Matrix m_b;
+ A_->ToDenseMatrix(&m_a);
+ B_->ToDenseMatrix(&m_b);
+ EXPECT_LT((m_a - m_b).norm(), 1e-12);
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+TEST_F(BlockSparseMatrixTest, Serialization) {
+ // Roundtrip through serialization and check for equality.
+ SparseMatrixProto proto;
+ A_->ToProto(&proto);
+
+ LOG(INFO) << proto.DebugString();
+
+ BlockSparseMatrix A2(proto);
+
+ Matrix m_a;
+ Matrix m_b;
+ A_->ToDenseMatrix(&m_a);
+ A2.ToDenseMatrix(&m_b);
+
+ LOG(INFO) << "\n" << m_a;
+ LOG(INFO) << "\n" << m_b;
+
+ EXPECT_LT((m_a - m_b).norm(), 1e-12);
+}
+#endif
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_structure.cc b/internal/ceres/block_structure.cc
new file mode 100644
index 0000000..e611311
--- /dev/null
+++ b/internal/ceres/block_structure.cc
@@ -0,0 +1,92 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/block_structure.h"
+#include "ceres/matrix_proto.h"
+
+namespace ceres {
+namespace internal {
+
+bool CellLessThan(const Cell& lhs, const Cell& rhs) {
+ return (lhs.block_id < rhs.block_id);
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+void ProtoToBlockStructure(const BlockStructureProto &proto,
+ CompressedRowBlockStructure *block_structure) {
+ // Decode the column blocks.
+ block_structure->cols.resize(proto.cols_size());
+ for (int i = 0; i < proto.cols_size(); ++i) {
+ block_structure->cols[i].size = proto.cols(i).size();
+ block_structure->cols[i].position =
+ proto.cols(i).position();
+ }
+ // Decode the row structure.
+ block_structure->rows.resize(proto.rows_size());
+ for (int i = 0; i < proto.rows_size(); ++i) {
+ const CompressedRowProto &row = proto.rows(i);
+ block_structure->rows[i].block.size = row.block().size();
+ block_structure->rows[i].block.position = row.block().position();
+
+ // Copy the cells within the row.
+ block_structure->rows[i].cells.resize(row.cells_size());
+ for (int j = 0; j < row.cells_size(); ++j) {
+ const CellProto &cell = row.cells(j);
+ block_structure->rows[i].cells[j].block_id = cell.block_id();
+ block_structure->rows[i].cells[j].position = cell.position();
+ }
+ }
+}
+
+void BlockStructureToProto(const CompressedRowBlockStructure &block_structure,
+ BlockStructureProto *proto) {
+ // Encode the column blocks.
+ for (int i = 0; i < block_structure.cols.size(); ++i) {
+ BlockProto *block = proto->add_cols();
+ block->set_size(block_structure.cols[i].size);
+ block->set_position(block_structure.cols[i].position);
+ }
+ // Encode the row structure.
+ for (int i = 0; i < block_structure.rows.size(); ++i) {
+ CompressedRowProto *row = proto->add_rows();
+ BlockProto *block = row->mutable_block();
+ block->set_size(block_structure.rows[i].block.size);
+ block->set_position(block_structure.rows[i].block.position);
+ for (int j = 0; j < block_structure.rows[i].cells.size(); ++j) {
+ CellProto *cell = row->add_cells();
+ cell->set_block_id(block_structure.rows[i].cells[j].block_id);
+ cell->set_position(block_structure.rows[i].cells[j].position);
+ }
+ }
+}
+#endif
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/block_structure.h b/internal/ceres/block_structure.h
new file mode 100644
index 0000000..f509067
--- /dev/null
+++ b/internal/ceres/block_structure.h
@@ -0,0 +1,105 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Block structure objects are used to carry information about the
+// dense block structure of sparse matrices. The BlockSparseMatrix
+// object uses the BlockStructure objects to keep track of the matrix
+// structure and operate upon it. This allows us to use more cache
+// friendly block oriented linear algebra operations on the matrix
+// instead of accessing it one scalar entry at a time.
+
+#ifndef CERES_INTERNAL_BLOCK_STRUCTURE_H_
+#define CERES_INTERNAL_BLOCK_STRUCTURE_H_
+
+#include <vector>
+#include "ceres/internal/port.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockStructureProto;
+
+typedef int16 BlockSize;
+
+struct Block {
+ Block() : size(-1), position(-1) {}
+ Block(int size_, int position_) : size(size_), position(position_) {}
+
+ BlockSize size;
+ int position; // Position along the row/column.
+};
+
+struct Cell {
+ Cell() : block_id(-1), position(-1) {}
+ Cell(int block_id_, int position_)
+ : block_id(block_id_), position(position_) {}
+
+ // Column or row block id as the case maybe.
+ int block_id;
+ // Where in the values array of the jacobian is this cell located.
+ int position;
+};
+
+// Order cell by their block_id;
+bool CellLessThan(const Cell& lhs, const Cell& rhs);
+
+struct CompressedList {
+ Block block;
+ vector<Cell> cells;
+};
+
+typedef CompressedList CompressedRow;
+typedef CompressedList CompressedColumn;
+
+struct CompressedRowBlockStructure {
+ vector<Block> cols;
+ vector<CompressedRow> rows;
+};
+
+struct CompressedColumnBlockStructure {
+ vector<Block> rows;
+ vector<CompressedColumn> cols;
+};
+
+// Deserialize the given block structure proto to the given block structure.
+// Destroys previous contents of block_structure.
+void ProtoToBlockStructure(const BlockStructureProto &proto,
+ CompressedRowBlockStructure *block_structure);
+
+// Serialize the given block structure to the given proto. Destroys previous
+// contents of proto.
+void BlockStructureToProto(const CompressedRowBlockStructure &block_structure,
+ BlockStructureProto *proto);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_BLOCK_STRUCTURE_H_
diff --git a/internal/ceres/canonical_views_clustering.cc b/internal/ceres/canonical_views_clustering.cc
new file mode 100644
index 0000000..d0dc1e6
--- /dev/null
+++ b/internal/ceres/canonical_views_clustering.cc
@@ -0,0 +1,238 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: David Gallup (dgallup@google.com)
+// Sameer Agarwal (sameeragarwal@google.com)
+
+#include "ceres/canonical_views_clustering.h"
+
+#include "ceres/collections_port.h"
+#include "ceres/graph.h"
+#include "ceres/internal/macros.h"
+#include "ceres/map_util.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+typedef HashMap<int, int> IntMap;
+typedef HashSet<int> IntSet;
+
+class CanonicalViewsClustering {
+ public:
+ CanonicalViewsClustering() {}
+
+ // Compute the canonical views clustering of the vertices of the
+ // graph. centers will contain the vertices that are the identified
+ // as the canonical views/cluster centers, and membership is a map
+ // from vertices to cluster_ids. The i^th cluster center corresponds
+ // to the i^th cluster. It is possible depending on the
+ // configuration of the clustering algorithm that some of the
+ // vertices may not be assigned to any cluster. In this case they
+ // are assigned to a cluster with id = kInvalidClusterId.
+ void ComputeClustering(const Graph<int>& graph,
+ const CanonicalViewsClusteringOptions& options,
+ vector<int>* centers,
+ IntMap* membership);
+
+ private:
+ void FindValidViews(IntSet* valid_views) const;
+ double ComputeClusteringQualityDifference(const int candidate,
+ const vector<int>& centers) const;
+ void UpdateCanonicalViewAssignments(const int canonical_view);
+ void ComputeClusterMembership(const vector<int>& centers,
+ IntMap* membership) const;
+
+ CanonicalViewsClusteringOptions options_;
+ const Graph<int>* graph_;
+ // Maps a view to its representative canonical view (its cluster
+ // center).
+ IntMap view_to_canonical_view_;
+ // Maps a view to its similarity to its current cluster center.
+ HashMap<int, double> view_to_canonical_view_similarity_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(CanonicalViewsClustering);
+};
+
+void ComputeCanonicalViewsClustering(
+ const Graph<int>& graph,
+ const CanonicalViewsClusteringOptions& options,
+ vector<int>* centers,
+ IntMap* membership) {
+ time_t start_time = time(NULL);
+ CanonicalViewsClustering cv;
+ cv.ComputeClustering(graph, options, centers, membership);
+ VLOG(2) << "Canonical views clustering time (secs): "
+ << time(NULL) - start_time;
+}
+
+// Implementation of CanonicalViewsClustering
+void CanonicalViewsClustering::ComputeClustering(
+ const Graph<int>& graph,
+ const CanonicalViewsClusteringOptions& options,
+ vector<int>* centers,
+ IntMap* membership) {
+ options_ = options;
+ CHECK_NOTNULL(centers)->clear();
+ CHECK_NOTNULL(membership)->clear();
+ graph_ = &graph;
+
+ IntSet valid_views;
+ FindValidViews(&valid_views);
+ while (valid_views.size() > 0) {
+ // Find the next best canonical view.
+ double best_difference = -std::numeric_limits<double>::max();
+ int best_view = 0;
+
+ // TODO(sameeragarwal): Make this loop multi-threaded.
+ for (IntSet::const_iterator view = valid_views.begin();
+ view != valid_views.end();
+ ++view) {
+ const double difference =
+ ComputeClusteringQualityDifference(*view, *centers);
+ if (difference > best_difference) {
+ best_difference = difference;
+ best_view = *view;
+ }
+ }
+
+ CHECK_GT(best_difference, -std::numeric_limits<double>::max());
+
+ // Add canonical view if quality improves, or if minimum is not
+ // yet met, otherwise break.
+ if ((best_difference <= 0) &&
+ (centers->size() >= options_.min_views)) {
+ break;
+ }
+
+ centers->push_back(best_view);
+ valid_views.erase(best_view);
+ UpdateCanonicalViewAssignments(best_view);
+ }
+
+ ComputeClusterMembership(*centers, membership);
+}
+
+// Return the set of vertices of the graph which have valid vertex
+// weights.
+void CanonicalViewsClustering::FindValidViews(
+ IntSet* valid_views) const {
+ const IntSet& views = graph_->vertices();
+ for (IntSet::const_iterator view = views.begin();
+ view != views.end();
+ ++view) {
+ if (graph_->VertexWeight(*view) != Graph<int>::InvalidWeight()) {
+ valid_views->insert(*view);
+ }
+ }
+}
+
+// Computes the difference in the quality score if 'candidate' were
+// added to the set of canonical views.
+double CanonicalViewsClustering::ComputeClusteringQualityDifference(
+ const int candidate,
+ const vector<int>& centers) const {
+ // View score.
+ double difference =
+ options_.view_score_weight * graph_->VertexWeight(candidate);
+
+ // Compute how much the quality score changes if the candidate view
+ // was added to the list of canonical views and its nearest
+ // neighbors became members of its cluster.
+ const IntSet& neighbors = graph_->Neighbors(candidate);
+ for (IntSet::const_iterator neighbor = neighbors.begin();
+ neighbor != neighbors.end();
+ ++neighbor) {
+ const double old_similarity =
+ FindWithDefault(view_to_canonical_view_similarity_, *neighbor, 0.0);
+ const double new_similarity = graph_->EdgeWeight(*neighbor, candidate);
+ if (new_similarity > old_similarity) {
+ difference += new_similarity - old_similarity;
+ }
+ }
+
+ // Number of views penalty.
+ difference -= options_.size_penalty_weight;
+
+ // Orthogonality.
+ for (int i = 0; i < centers.size(); ++i) {
+ difference -= options_.similarity_penalty_weight *
+ graph_->EdgeWeight(centers[i], candidate);
+ }
+
+ return difference;
+}
+
+// Reassign views if they're more similar to the new canonical view.
+void CanonicalViewsClustering::UpdateCanonicalViewAssignments(
+ const int canonical_view) {
+ const IntSet& neighbors = graph_->Neighbors(canonical_view);
+ for (IntSet::const_iterator neighbor = neighbors.begin();
+ neighbor != neighbors.end();
+ ++neighbor) {
+ const double old_similarity =
+ FindWithDefault(view_to_canonical_view_similarity_, *neighbor, 0.0);
+ const double new_similarity =
+ graph_->EdgeWeight(*neighbor, canonical_view);
+ if (new_similarity > old_similarity) {
+ view_to_canonical_view_[*neighbor] = canonical_view;
+ view_to_canonical_view_similarity_[*neighbor] = new_similarity;
+ }
+ }
+}
+
+// Assign a cluster id to each view.
+void CanonicalViewsClustering::ComputeClusterMembership(
+ const vector<int>& centers,
+ IntMap* membership) const {
+ CHECK_NOTNULL(membership)->clear();
+
+ // The i^th cluster has cluster id i.
+ IntMap center_to_cluster_id;
+ for (int i = 0; i < centers.size(); ++i) {
+ center_to_cluster_id[centers[i]] = i;
+ }
+
+ static const int kInvalidClusterId = -1;
+
+ const IntSet& views = graph_->vertices();
+ for (IntSet::const_iterator view = views.begin();
+ view != views.end();
+ ++view) {
+ IntMap::const_iterator it =
+ view_to_canonical_view_.find(*view);
+ int cluster_id = kInvalidClusterId;
+ if (it != view_to_canonical_view_.end()) {
+ cluster_id = FindOrDie(center_to_cluster_id, it->second);
+ }
+
+ InsertOrDie(membership, *view, cluster_id);
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/canonical_views_clustering.h b/internal/ceres/canonical_views_clustering.h
new file mode 100644
index 0000000..171ac55
--- /dev/null
+++ b/internal/ceres/canonical_views_clustering.h
@@ -0,0 +1,133 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// An implementation of the Canonical Views clustering algorithm from
+// "Scene Summarization for Online Image Collections", Ian Simon, Noah
+// Snavely, Steven M. Seitz, ICCV 2007.
+//
+// More details can be found at
+// http://grail.cs.washington.edu/projects/canonview/
+//
+// Ceres uses this algorithm to perform view clustering for
+// constructing visibility based preconditioners.
+
+#ifndef CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
+#define CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
+
+#include <vector>
+
+#include <glog/logging.h>
+#include "ceres/collections_port.h"
+#include "ceres/graph.h"
+#include "ceres/map_util.h"
+#include "ceres/internal/macros.h"
+
+namespace ceres {
+namespace internal {
+
+struct CanonicalViewsClusteringOptions;
+
+// Compute a partitioning of the vertices of the graph using the
+// canonical views clustering algorithm.
+//
+// In the following we will use the terms vertices and views
+// interchangably. Given a weighted Graph G(V,E), the canonical views
+// of G are the the set of vertices that best "summarize" the content
+// of the graph. If w_ij i s the weight connecting the vertex i to
+// vertex j, and C is the set of canonical views. Then the objective
+// of the canonical views algorithm is
+//
+// E[C] = sum_[i in V] max_[j in C] w_ij
+// - size_penalty_weight * |C|
+// - similarity_penalty_weight * sum_[i in C, j in C, j > i] w_ij
+//
+// alpha is the size penalty that penalizes large number of canonical
+// views.
+//
+// beta is the similarity penalty that penalizes canonical views that
+// are too similar to other canonical views.
+//
+// Thus the canonical views algorithm tries to find a canonical view
+// for each vertex in the graph which best explains it, while trying
+// to minimize the number of canonical views and the overlap between
+// them.
+//
+// We further augment the above objective function by allowing for per
+// vertex weights, higher weights indicating a higher preference for
+// being chosen as a canonical view. Thus if w_i is the vertex weight
+// for vertex i, the objective function is then
+//
+// E[C] = sum_[i in V] max_[j in C] w_ij
+// - size_penalty_weight * |C|
+// - similarity_penalty_weight * sum_[i in C, j in C, j > i] w_ij
+// + view_score_weight * sum_[i in C] w_i
+//
+// centers will contain the vertices that are the identified
+// as the canonical views/cluster centers, and membership is a map
+// from vertices to cluster_ids. The i^th cluster center corresponds
+// to the i^th cluster.
+//
+// It is possible depending on the configuration of the clustering
+// algorithm that some of the vertices may not be assigned to any
+// cluster. In this case they are assigned to a cluster with id = -1;
+void ComputeCanonicalViewsClustering(
+ const Graph<int>& graph,
+ const CanonicalViewsClusteringOptions& options,
+ vector<int>* centers,
+ HashMap<int, int>* membership);
+
+struct CanonicalViewsClusteringOptions {
+ CanonicalViewsClusteringOptions()
+ : min_views(3),
+ size_penalty_weight(5.75),
+ similarity_penalty_weight(100.0),
+ view_score_weight(0.0) {
+ }
+ // The minimum number of canonical views to compute.
+ int min_views;
+
+ // Penalty weight for the number of canonical views. A higher
+ // number will result in fewer canonical views.
+ double size_penalty_weight;
+
+ // Penalty weight for the diversity (orthogonality) of the
+ // canonical views. A higher number will encourage less similar
+ // canonical views.
+ double similarity_penalty_weight;
+
+ // Weight for per-view scores. Lower weight places less
+ // confidence in the view scores.
+ double view_score_weight;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
diff --git a/internal/ceres/canonical_views_clustering_test.cc b/internal/ceres/canonical_views_clustering_test.cc
new file mode 100644
index 0000000..29bac3c
--- /dev/null
+++ b/internal/ceres/canonical_views_clustering_test.cc
@@ -0,0 +1,143 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sameer Agarwal (sameeragarwal@google.com)
+// David Gallup (dgallup@google.com)
+
+#include "ceres/canonical_views_clustering.h"
+
+#include "ceres/collections_port.h"
+#include "ceres/graph.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+const int kVertexIds[] = {0, 1, 2, 3};
+class CanonicalViewsTest : public ::testing::Test {
+ protected:
+ virtual void SetUp() {
+ // The graph structure is as follows.
+ //
+ // Vertex weights: 0 2 2 0
+ // V0-----V1-----V2-----V3
+ // Edge weights: 0.8 0.9 0.3
+ const double kVertexWeights[] = {0.0, 2.0, 2.0, -1.0};
+ for (int i = 0; i < 4; ++i) {
+ graph_.AddVertex(i, kVertexWeights[i]);
+ }
+ // Create self edges.
+ // CanonicalViews requires that every view "sees" itself.
+ for (int i = 0; i < 4; ++i) {
+ graph_.AddEdge(i, i, 1.0);
+ }
+
+ // Create three edges.
+ const double kEdgeWeights[] = {0.8, 0.9, 0.3};
+ for (int i = 0; i < 3; ++i) {
+ // The graph interface is directed, so remember to create both
+ // edges.
+ graph_.AddEdge(kVertexIds[i], kVertexIds[i + 1], kEdgeWeights[i]);
+ }
+ }
+
+ void ComputeClustering() {
+ ComputeCanonicalViewsClustering(graph_, options_, &centers_, &membership_);
+ }
+
+ Graph<int> graph_;
+
+ CanonicalViewsClusteringOptions options_;
+ vector<int> centers_;
+ HashMap<int, int> membership_;
+};
+
+TEST_F(CanonicalViewsTest, ComputeCanonicalViewsTest) {
+ options_.min_views = 0;
+ options_.size_penalty_weight = 0.5;
+ options_.similarity_penalty_weight = 0.0;
+ options_.view_score_weight = 0.0;
+ ComputeClustering();
+
+ // 2 canonical views.
+ EXPECT_EQ(centers_.size(), 2);
+ EXPECT_EQ(centers_[0], kVertexIds[1]);
+ EXPECT_EQ(centers_[1], kVertexIds[3]);
+
+ // Check cluster membership.
+ EXPECT_EQ(FindOrDie(membership_, kVertexIds[0]), 0);
+ EXPECT_EQ(FindOrDie(membership_, kVertexIds[1]), 0);
+ EXPECT_EQ(FindOrDie(membership_, kVertexIds[2]), 0);
+ EXPECT_EQ(FindOrDie(membership_, kVertexIds[3]), 1);
+}
+
+// Increases size penalty so the second canonical view won't be
+// chosen.
+TEST_F(CanonicalViewsTest, SizePenaltyTest) {
+ options_.min_views = 0;
+ options_.size_penalty_weight = 2.0;
+ options_.similarity_penalty_weight = 0.0;
+ options_.view_score_weight = 0.0;
+ ComputeClustering();
+
+ // 1 canonical view.
+ EXPECT_EQ(centers_.size(), 1);
+ EXPECT_EQ(centers_[0], kVertexIds[1]);
+}
+
+
+// Increases view score weight so vertex 2 will be chosen.
+TEST_F(CanonicalViewsTest, ViewScoreTest) {
+ options_.min_views = 0;
+ options_.size_penalty_weight = 0.5;
+ options_.similarity_penalty_weight = 0.0;
+ options_.view_score_weight = 1.0;
+ ComputeClustering();
+
+ // 2 canonical views.
+ EXPECT_EQ(centers_.size(), 2);
+ EXPECT_EQ(centers_[0], kVertexIds[1]);
+ EXPECT_EQ(centers_[1], kVertexIds[2]);
+}
+
+// Increases similarity penalty so vertex 2 won't be chosen despite
+// it's view score.
+TEST_F(CanonicalViewsTest, SimilarityPenaltyTest) {
+ options_.min_views = 0;
+ options_.size_penalty_weight = 0.5;
+ options_.similarity_penalty_weight = 3.0;
+ options_.view_score_weight = 1.0;
+ ComputeClustering();
+
+ // 2 canonical views.
+ EXPECT_EQ(centers_.size(), 1);
+ EXPECT_EQ(centers_[0], kVertexIds[1]);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/casts.h b/internal/ceres/casts.h
new file mode 100644
index 0000000..99cf218
--- /dev/null
+++ b/internal/ceres/casts.h
@@ -0,0 +1,108 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_CASTS_H_
+#define CERES_INTERNAL_CASTS_H_
+
+#include <cassert>
+#include <cstddef> // For NULL.
+
+namespace ceres {
+
+// Identity metafunction.
+template <class T>
+struct identity_ {
+ typedef T type;
+};
+
+// Use implicit_cast as a safe version of static_cast or const_cast
+// for implicit conversions. For example:
+// - Upcasting in a type hierarchy.
+// - Performing arithmetic conversions (int32 to int64, int to double, etc.).
+// - Adding const or volatile qualifiers.
+//
+// In general, implicit_cast can be used to convert this code
+// To to = from;
+// DoSomething(to);
+// to this
+// DoSomething(implicit_cast<To>(from));
+//
+// base::identity_ is used to make a non-deduced context, which
+// forces all callers to explicitly specify the template argument.
+template<typename To>
+inline To implicit_cast(typename identity_<To>::type to) {
+ return to;
+}
+
+// This version of implicit_cast is used when two template arguments
+// are specified. It's obsolete and should not be used.
+template<typename To, typename From>
+inline To implicit_cast(typename identity_<From>::type const &f) {
+ return f;
+}
+
+// When you upcast (that is, cast a pointer from type Foo to type
+// SuperclassOfFoo), it's fine to use implicit_cast<>, since upcasts
+// always succeed. When you downcast (that is, cast a pointer from
+// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
+// how do you know the pointer is really of type SubclassOfFoo? It
+// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus,
+// when you downcast, you should use this macro. In debug mode, we
+// use dynamic_cast<> to double-check the downcast is legal (we die
+// if it's not). In normal mode, we do the efficient static_cast<>
+// instead. Thus, it's important to test in debug mode to make sure
+// the cast is legal!
+// This is the only place in the code we should use dynamic_cast<>.
+// In particular, you SHOULDN'T be using dynamic_cast<> in order to
+// do RTTI (eg code like this:
+// if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);
+// if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
+// You should design the code some other way not to need this.
+
+template<typename To, typename From> // use like this: down_cast<T*>(foo);
+inline To down_cast(From* f) { // so we only accept pointers
+ // Ensures that To is a sub-type of From *. This test is here only
+ // for compile-time type checking, and has no overhead in an
+ // optimized build at run-time, as it will be optimized away
+ // completely.
+
+ // TODO(csilvers): This should use COMPILE_ASSERT.
+ if (false) {
+ implicit_cast<From*, To>(NULL);
+ }
+
+ // uses RTTI in dbg and fastbuild. asserts are disabled in opt builds.
+ assert(f == NULL || dynamic_cast<To>(f) != NULL); // NOLINT
+ return static_cast<To>(f);
+}
+
+} // namespace ceres
+
+#endif // CERES_INTERNAL_CASTS_H_
diff --git a/internal/ceres/cgnr_linear_operator.h b/internal/ceres/cgnr_linear_operator.h
new file mode 100644
index 0000000..f32d8d9
--- /dev/null
+++ b/internal/ceres/cgnr_linear_operator.h
@@ -0,0 +1,120 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_CGNR_LINEAR_OPERATOR_H_
+#define CERES_INTERNAL_CGNR_LINEAR_OPERATOR_H_
+
+#include <algorithm>
+#include "ceres/linear_operator.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+class SparseMatrix;
+
+// A linear operator which takes a matrix A and a diagonal vector D and
+// performs products of the form
+//
+// (A^T A + D^T D)x
+//
+// This is used to implement iterative general sparse linear solving with
+// conjugate gradients, where A is the Jacobian and D is a regularizing
+// parameter. A brief proof that D^T D is the correct regularizer:
+//
+// Given a regularized least squares problem:
+//
+// min ||Ax - b||^2 + ||Dx||^2
+// x
+//
+// First expand into matrix notation:
+//
+// (Ax - b)^T (Ax - b) + xD^TDx
+//
+// Then multiply out to get:
+//
+// = xA^TAx - 2b^T Ax + b^Tb + xD^TDx
+//
+// Take the derivative:
+//
+// 0 = 2A^TAx - 2A^T b + 2 D^TDx
+// 0 = A^TAx - A^T b + D^TDx
+// 0 = (A^TA + D^TD)x - A^T b
+//
+// Thus, the symmetric system we need to solve for CGNR is
+//
+// Sx = z
+//
+// with S = A^TA + D^TD
+// and z = A^T b
+//
+// Note: This class is not thread safe, since it uses some temporary storage.
+class CgnrLinearOperator : public LinearOperator {
+ public:
+ CgnrLinearOperator(const LinearOperator& A, const double *D)
+ : A_(A), D_(D), z_(new double[A.num_rows()]) {
+ }
+ virtual ~CgnrLinearOperator() {}
+
+ virtual void RightMultiply(const double* x, double* y) const {
+ std::fill(z_.get(), z_.get() + A_.num_rows(), 0.0);
+
+ // z = Ax
+ A_.RightMultiply(x, z_.get());
+
+ // y = y + Atz
+ A_.LeftMultiply(z_.get(), y);
+
+ // y = y + DtDx
+ if (D_ != NULL) {
+ int n = A_.num_cols();
+ VectorRef(y, n).array() += ConstVectorRef(D_, n).array().square() *
+ ConstVectorRef(x, n).array();
+ }
+ }
+
+ virtual void LeftMultiply(const double* x, double* y) const {
+ RightMultiply(x, y);
+ }
+
+ virtual int num_rows() const { return A_.num_cols(); }
+ virtual int num_cols() const { return A_.num_cols(); }
+
+ private:
+ const LinearOperator& A_;
+ const double* D_;
+ scoped_array<double> z_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_CGNR_LINEAR_OPERATOR_H_
diff --git a/internal/ceres/cgnr_solver.cc b/internal/ceres/cgnr_solver.cc
new file mode 100644
index 0000000..ccc8026
--- /dev/null
+++ b/internal/ceres/cgnr_solver.cc
@@ -0,0 +1,80 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/cgnr_solver.h"
+
+#include "glog/logging.h"
+#include "ceres/linear_solver.h"
+#include "ceres/cgnr_linear_operator.h"
+#include "ceres/conjugate_gradients_solver.h"
+#include "ceres/block_jacobi_preconditioner.h"
+
+namespace ceres {
+namespace internal {
+
+CgnrSolver::CgnrSolver(const LinearSolver::Options& options)
+ : options_(options),
+ jacobi_preconditioner_(NULL) {
+}
+
+LinearSolver::Summary CgnrSolver::Solve(
+ LinearOperator* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) {
+ // Form z = Atb.
+ scoped_array<double> z(new double[A->num_cols()]);
+ std::fill(z.get(), z.get() + A->num_cols(), 0.0);
+ A->LeftMultiply(b, z.get());
+
+ // Precondition if necessary.
+ LinearSolver::PerSolveOptions cg_per_solve_options = per_solve_options;
+ if (options_.preconditioner_type == JACOBI) {
+ if (jacobi_preconditioner_.get() == NULL) {
+ jacobi_preconditioner_.reset(new BlockJacobiPreconditioner(*A));
+ }
+ jacobi_preconditioner_->Update(*A, per_solve_options.D);
+ cg_per_solve_options.preconditioner = jacobi_preconditioner_.get();
+ } else if (options_.preconditioner_type != IDENTITY) {
+ LOG(FATAL) << "CGNR only supports IDENTITY and JACOBI preconditioners.";
+ }
+
+ // Solve (AtA + DtD)x = z (= Atb).
+ std::fill(x, x + A->num_cols(), 0.0);
+ CgnrLinearOperator lhs(*A, per_solve_options.D);
+ ConjugateGradientsSolver conjugate_gradient_solver(options_);
+ return conjugate_gradient_solver.Solve(&lhs,
+ z.get(),
+ cg_per_solve_options,
+ x);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/cgnr_solver.h b/internal/ceres/cgnr_solver.h
new file mode 100644
index 0000000..877b4c4
--- /dev/null
+++ b/internal/ceres/cgnr_solver.h
@@ -0,0 +1,66 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_CGNR_SOLVER_H_
+#define CERES_INTERNAL_CGNR_SOLVER_H_
+
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockJacobiPreconditioner;
+
+// A conjugate gradients on the normal equations solver. This directly solves
+// for the solution to
+//
+// (A^T A + D^T D)x = A^T b
+//
+// as required for solving for x in the least squares sense. Currently only
+// block diagonal preconditioning is supported.
+class CgnrSolver : public LinearSolver {
+ public:
+ explicit CgnrSolver(const LinearSolver::Options& options);
+ virtual Summary Solve(LinearOperator* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x);
+
+ private:
+ const LinearSolver::Options options_;
+ scoped_ptr<BlockJacobiPreconditioner> jacobi_preconditioner_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(CgnrSolver);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_CGNR_SOLVER_H_
diff --git a/internal/ceres/collections_port.cc b/internal/ceres/collections_port.cc
new file mode 100644
index 0000000..fbeb61c
--- /dev/null
+++ b/internal/ceres/collections_port.cc
@@ -0,0 +1,43 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/collections_port.h"
+
+inline uint32 Hash32NumWithSeed(uint32 num, uint32 c) {
+ uint32 b = 0x9e3779b9UL; // the golden ratio; an arbitrary value
+ mix(num, b, c);
+ return c;
+}
+
+inline uint64 Hash64NumWithSeed(uint64 num, uint64 c) {
+ uint64 b = GG_ULONGLONG(0xe08c1d668b756f82); // more of the golden ratio
+ mix(num, b, c);
+ return c;
+}
diff --git a/internal/ceres/collections_port.h b/internal/ceres/collections_port.h
new file mode 100644
index 0000000..063b6d3
--- /dev/null
+++ b/internal/ceres/collections_port.h
@@ -0,0 +1,167 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Portable HashMap and HashSet, and a specialized overload for hashing pairs.
+
+#ifndef CERES_INTERNAL_COLLECTIONS_PORT_H_
+#define CERES_INTERNAL_COLLECTIONS_PORT_H_
+
+#if defined(CERES_NO_TR1)
+# include <map>
+# include <set>
+#else
+# if defined(_MSC_VER)
+# include <unordered_map>
+# include <unordered_set>
+# else
+# include <tr1/unordered_map>
+# include <tr1/unordered_set>
+# endif
+#endif
+#include <utility>
+#include "ceres/integral_types.h"
+#include "ceres/internal/port.h"
+
+// Some systems don't have access to TR1. In that case, substitute the hash
+// map/set with normal map/set. The price to pay is slightly slower speed for
+// some operations.
+#if defined(CERES_NO_TR1)
+
+namespace ceres {
+namespace internal {
+
+template<typename K, typename V>
+struct HashMap : map<K, V> {};
+
+template<typename K>
+struct HashSet : set<K> {};
+
+} // namespace internal
+} // namespace ceres
+
+#else
+
+namespace ceres {
+namespace internal {
+
+template<typename K, typename V>
+struct HashMap : std::tr1::unordered_map<K, V> {};
+
+template<typename K>
+struct HashSet : std::tr1::unordered_set<K> {};
+
+#ifdef _WIN32
+#define GG_LONGLONG(x) x##I64
+#define GG_ULONGLONG(x) x##UI64
+#else
+#define GG_LONGLONG(x) x##LL
+#define GG_ULONGLONG(x) x##ULL
+#endif
+
+// The hash function is due to Bob Jenkins (see
+// http://burtleburtle.net/bob/hash/index.html). Each mix takes 36 instructions,
+// in 18 cycles if you're lucky. On x86 architectures, this requires 45
+// instructions in 27 cycles, if you're lucky.
+//
+// 32bit version
+inline void hash_mix(uint32& a, uint32& b, uint32& c) {
+ a -= b; a -= c; a ^= (c>>13);
+ b -= c; b -= a; b ^= (a<<8);
+ c -= a; c -= b; c ^= (b>>13);
+ a -= b; a -= c; a ^= (c>>12);
+ b -= c; b -= a; b ^= (a<<16);
+ c -= a; c -= b; c ^= (b>>5);
+ a -= b; a -= c; a ^= (c>>3);
+ b -= c; b -= a; b ^= (a<<10);
+ c -= a; c -= b; c ^= (b>>15);
+}
+
+// 64bit version
+inline void hash_mix(uint64& a, uint64& b, uint64& c) {
+ a -= b; a -= c; a ^= (c>>43);
+ b -= c; b -= a; b ^= (a<<9);
+ c -= a; c -= b; c ^= (b>>8);
+ a -= b; a -= c; a ^= (c>>38);
+ b -= c; b -= a; b ^= (a<<23);
+ c -= a; c -= b; c ^= (b>>5);
+ a -= b; a -= c; a ^= (c>>35);
+ b -= c; b -= a; b ^= (a<<49);
+ c -= a; c -= b; c ^= (b>>11);
+}
+
+inline uint32 Hash32NumWithSeed(uint32 num, uint32 c) {
+ // The golden ratio; an arbitrary value.
+ uint32 b = 0x9e3779b9UL;
+ hash_mix(num, b, c);
+ return c;
+}
+
+inline uint64 Hash64NumWithSeed(uint64 num, uint64 c) {
+ // More of the golden ratio.
+ uint64 b = GG_ULONGLONG(0xe08c1d668b756f82);
+ hash_mix(num, b, c);
+ return c;
+}
+
+} // namespace internal
+} // namespace ceres
+
+// Since on some platforms this is a doubly-nested namespace (std::tr1) and
+// others it is not, the entire namespace line must be in a macro.
+CERES_HASH_NAMESPACE_START
+
+// The outrageously annoying specializations below are for portability reasons.
+// In short, it's not possible to have two overloads of hash<pair<T1, T2>
+
+// Hasher for STL pairs. Requires hashers for both members to be defined.
+template<typename T>
+struct hash<pair<T, T> > {
+ size_t operator()(const pair<T, T>& p) const {
+ size_t h1 = hash<T>()(p.first);
+ size_t h2 = hash<T>()(p.second);
+ // The decision below is at compile time
+ return (sizeof(h1) <= sizeof(ceres::internal::uint32)) ?
+ ceres::internal::Hash32NumWithSeed(h1, h2) :
+ ceres::internal::Hash64NumWithSeed(h1, h2);
+ }
+ // Less than operator for MSVC.
+ bool operator()(const pair<T, T>& a,
+ const pair<T, T>& b) const {
+ return a < b;
+ }
+ static const size_t bucket_size = 4; // These are required by MSVC
+ static const size_t min_buckets = 8; // 4 and 8 are defaults.
+};
+
+CERES_HASH_NAMESPACE_END
+
+#endif // CERES_NO_TR1
+
+#endif // CERES_INTERNAL_COLLECTIONS_PORT_H_
diff --git a/internal/ceres/compressed_row_jacobian_writer.cc b/internal/ceres/compressed_row_jacobian_writer.cc
new file mode 100644
index 0000000..912c484
--- /dev/null
+++ b/internal/ceres/compressed_row_jacobian_writer.cc
@@ -0,0 +1,217 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/compressed_row_jacobian_writer.h"
+
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/scratch_evaluate_preparer.h"
+
+namespace ceres {
+namespace internal {
+
+SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
+ const vector<ResidualBlock*>& residual_blocks =
+ program_->residual_blocks();
+
+ int total_num_residuals = program_->NumResiduals();
+ int total_num_effective_parameters = program_->NumEffectiveParameters();
+
+ // Count the number of jacobian nonzeros.
+ int num_jacobian_nonzeros = 0;
+ for (int i = 0; i < residual_blocks.size(); ++i) {
+ ResidualBlock* residual_block = residual_blocks[i];
+ const int num_residuals = residual_block->NumResiduals();
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+ if (!parameter_block->IsConstant()) {
+ num_jacobian_nonzeros += num_residuals * parameter_block->LocalSize();
+ }
+ }
+ }
+
+ // Allocate storage for the jacobian with some extra space at the end.
+ // Allocate more space than needed to store the jacobian so that when the LM
+ // algorithm adds the diagonal, no reallocation is necessary. This reduces
+ // peak memory usage significantly.
+ CompressedRowSparseMatrix* jacobian =
+ new CompressedRowSparseMatrix(
+ total_num_residuals,
+ total_num_effective_parameters,
+ num_jacobian_nonzeros + total_num_effective_parameters);
+
+ // At this stage, the CompressedSparseMatrix is an invalid state. But this
+ // seems to be the only way to construct it without doing a memory copy.
+ int* rows = jacobian->mutable_rows();
+ int* cols = jacobian->mutable_cols();
+ int row_pos = 0;
+ rows[0] = 0;
+ for (int i = 0; i < residual_blocks.size(); ++i) {
+ const ResidualBlock* residual_block = residual_blocks[i];
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+ // Count the number of derivatives for a row of this residual block and
+ // build a list of active parameter block indices.
+ int num_derivatives = 0;
+ vector<int> parameter_indices;
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+ if (!parameter_block->IsConstant()) {
+ parameter_indices.push_back(parameter_block->index());
+ num_derivatives += parameter_block->LocalSize();
+ }
+ }
+
+ // Sort the parameters by their position in the state vector.
+ sort(parameter_indices.begin(), parameter_indices.end());
+ CHECK(unique(parameter_indices.begin(), parameter_indices.end()) ==
+ parameter_indices.end())
+ << "Ceres internal error: "
+ << "Duplicate parameter blocks detected in a cost function. "
+ << "This should never happen. Please report this to "
+ << "the Ceres developers.";
+
+ // Update the row indices.
+ const int num_residuals = residual_block->NumResiduals();
+ for (int j = 0; j < num_residuals; ++j) {
+ rows[row_pos + j + 1] = rows[row_pos + j] + num_derivatives;
+ }
+
+ // Iterate over parameter blocks in the order which they occur in the
+ // parameter vector. This code mirrors that in Write(), where jacobian
+ // values are updated.
+ int col_pos = 0;
+ for (int j = 0; j < parameter_indices.size(); ++j) {
+ ParameterBlock* parameter_block =
+ program_->parameter_blocks()[parameter_indices[j]];
+ const int parameter_block_size = parameter_block->LocalSize();
+
+ for (int r = 0; r < num_residuals; ++r) {
+ // This is the position in the values array of the jacobian where this
+ // row of the jacobian block should go.
+ const int column_block_begin = rows[row_pos + r] + col_pos;
+
+ for (int c = 0; c < parameter_block_size; ++c) {
+ cols[column_block_begin + c] = parameter_block->delta_offset() + c;
+ }
+ }
+ col_pos += parameter_block_size;
+ }
+ row_pos += num_residuals;
+ }
+ CHECK_EQ(num_jacobian_nonzeros, rows[total_num_residuals]);
+
+ // Populate the row and column block vectors for use by block
+ // oriented ordering algorithms. This is useful when
+ // Solver::Options::use_block_amd = true.
+ const vector<ParameterBlock*>& parameter_blocks = program_->parameter_blocks();
+ vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
+ col_blocks.resize(parameter_blocks.size());
+ for (int i = 0; i < parameter_blocks.size(); ++i) {
+ col_blocks[i] = parameter_blocks[i]->LocalSize();
+ }
+
+ vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
+ row_blocks.resize(residual_blocks.size());
+ for (int i = 0; i < residual_blocks.size(); ++i) {
+ row_blocks[i] = residual_blocks[i]->NumResiduals();
+ }
+
+ return jacobian;
+}
+
+void CompressedRowJacobianWriter::Write(int residual_id,
+ int residual_offset,
+ double **jacobians,
+ SparseMatrix* base_jacobian) {
+ CompressedRowSparseMatrix* jacobian =
+ down_cast<CompressedRowSparseMatrix*>(base_jacobian);
+
+ double* jacobian_values = jacobian->mutable_values();
+ const int* jacobian_rows = jacobian->rows();
+
+ const ResidualBlock* residual_block =
+ program_->residual_blocks()[residual_id];
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+ const int num_residuals = residual_block->NumResiduals();
+
+ // It is necessary to determine the order of the jacobian blocks before
+ // copying them into the CompressedRowSparseMatrix. Just because a cost
+ // function uses parameter blocks 1 after 2 in its arguments does not mean
+ // that the block 1 occurs before block 2 in the column layout of the
+ // jacobian. Thus, determine the order by sorting the jacobian blocks by their
+ // position in the state vector.
+ vector<pair<int, int> > evaluated_jacobian_blocks;
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ const ParameterBlock* parameter_block =
+ residual_block->parameter_blocks()[j];
+ if (!parameter_block->IsConstant()) {
+ evaluated_jacobian_blocks.push_back(
+ make_pair(parameter_block->index(), j));
+ }
+ }
+ sort(evaluated_jacobian_blocks.begin(), evaluated_jacobian_blocks.end());
+
+ // Where in the current row does the jacobian for a parameter block begin.
+ int col_pos = 0;
+
+ // Iterate over the jacobian blocks in increasing order of their
+ // positions in the reduced parameter vector.
+ for (int i = 0; i < evaluated_jacobian_blocks.size(); ++i) {
+ const ParameterBlock* parameter_block =
+ program_->parameter_blocks()[evaluated_jacobian_blocks[i].first];
+ const int argument = evaluated_jacobian_blocks[i].second;
+ const int parameter_block_size = parameter_block->LocalSize();
+
+ // Copy one row of the jacobian block at a time.
+ for (int r = 0; r < num_residuals; ++r) {
+ // Position of the r^th row of the current jacobian block.
+ const double* block_row_begin =
+ jacobians[argument] + r * parameter_block_size;
+
+ // Position in the values array of the jacobian where this
+ // row of the jacobian block should go.
+ double* column_block_begin =
+ jacobian_values + jacobian_rows[residual_offset + r] + col_pos;
+
+ copy(block_row_begin,
+ block_row_begin + parameter_block_size,
+ column_block_begin);
+ }
+ col_pos += parameter_block_size;
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/compressed_row_jacobian_writer.h b/internal/ceres/compressed_row_jacobian_writer.h
new file mode 100644
index 0000000..c103165
--- /dev/null
+++ b/internal/ceres/compressed_row_jacobian_writer.h
@@ -0,0 +1,75 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A jacobian writer that directly writes to compressed row sparse matrices.
+
+#ifndef CERES_INTERNAL_COMPRESSED_ROW_JACOBIAN_WRITER_H_
+#define CERES_INTERNAL_COMPRESSED_ROW_JACOBIAN_WRITER_H_
+
+#include "ceres/evaluator.h"
+#include "ceres/scratch_evaluate_preparer.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+class SparseMatrix;
+
+class CompressedRowJacobianWriter {
+ public:
+ CompressedRowJacobianWriter(Evaluator::Options /* ignored */,
+ Program* program)
+ : program_(program) {
+ }
+
+ // JacobianWriter interface.
+
+ // Since the compressed row matrix has different layout than that assumed by
+ // the cost functions, use scratch space to store the jacobians temporarily
+ // then copy them over to the larger jacobian in the Write() function.
+ ScratchEvaluatePreparer* CreateEvaluatePreparers(int num_threads) {
+ return ScratchEvaluatePreparer::Create(*program_, num_threads);
+ }
+
+ SparseMatrix* CreateJacobian() const;
+
+ void Write(int residual_id,
+ int residual_offset,
+ double **jacobians,
+ SparseMatrix* base_jacobian);
+
+ private:
+ Program* program_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_COMPRESSED_ROW_JACOBIAN_WRITER_H_
diff --git a/internal/ceres/compressed_row_sparse_matrix.cc b/internal/ceres/compressed_row_sparse_matrix.cc
new file mode 100644
index 0000000..1b61468
--- /dev/null
+++ b/internal/ceres/compressed_row_sparse_matrix.cc
@@ -0,0 +1,354 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/compressed_row_sparse_matrix.h"
+
+#include <algorithm>
+#include <vector>
+#include "ceres/crs_matrix.h"
+#include "ceres/internal/port.h"
+#include "ceres/matrix_proto.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// Helper functor used by the constructor for reordering the contents
+// of a TripletSparseMatrix. This comparator assumes thay there are no
+// duplicates in the pair of arrays rows and cols, i.e., there is no
+// indices i and j (not equal to each other) s.t.
+//
+// rows[i] == rows[j] && cols[i] == cols[j]
+//
+// If this is the case, this functor will not be a StrictWeakOrdering.
+struct RowColLessThan {
+ RowColLessThan(const int* rows, const int* cols)
+ : rows(rows), cols(cols) {
+ }
+
+ bool operator()(const int x, const int y) const {
+ if (rows[x] == rows[y]) {
+ return (cols[x] < cols[y]);
+ }
+ return (rows[x] < rows[y]);
+ }
+
+ const int* rows;
+ const int* cols;
+};
+
+} // namespace
+
+// This constructor gives you a semi-initialized CompressedRowSparseMatrix.
+CompressedRowSparseMatrix::CompressedRowSparseMatrix(int num_rows,
+ int num_cols,
+ int max_num_nonzeros) {
+ num_rows_ = num_rows;
+ num_cols_ = num_cols;
+ max_num_nonzeros_ = max_num_nonzeros;
+
+ VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
+ << " max_num_nonzeros: " << max_num_nonzeros_
+ << ". Allocating " << (num_rows_ + 1) * sizeof(int) + // NOLINT
+ max_num_nonzeros_ * sizeof(int) + // NOLINT
+ max_num_nonzeros_ * sizeof(double); // NOLINT
+
+ rows_.reset(new int[num_rows_ + 1]);
+ cols_.reset(new int[max_num_nonzeros_]);
+ values_.reset(new double[max_num_nonzeros_]);
+
+ fill(rows_.get(), rows_.get() + num_rows_ + 1, 0);
+ fill(cols_.get(), cols_.get() + max_num_nonzeros_, 0);
+ fill(values_.get(), values_.get() + max_num_nonzeros_, 0);
+}
+
+CompressedRowSparseMatrix::CompressedRowSparseMatrix(
+ const TripletSparseMatrix& m) {
+ num_rows_ = m.num_rows();
+ num_cols_ = m.num_cols();
+ max_num_nonzeros_ = m.max_num_nonzeros();
+
+ // index is the list of indices into the TripletSparseMatrix m.
+ vector<int> index(m.num_nonzeros(), 0);
+ for (int i = 0; i < m.num_nonzeros(); ++i) {
+ index[i] = i;
+ }
+
+ // Sort index such that the entries of m are ordered by row and ties
+ // are broken by column.
+ sort(index.begin(), index.end(), RowColLessThan(m.rows(), m.cols()));
+
+ VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
+ << " max_num_nonzeros: " << max_num_nonzeros_
+ << ". Allocating " << (num_rows_ + 1) * sizeof(int) + // NOLINT
+ max_num_nonzeros_ * sizeof(int) + // NOLINT
+ max_num_nonzeros_ * sizeof(double); // NOLINT
+
+ rows_.reset(new int[num_rows_ + 1]);
+ cols_.reset(new int[max_num_nonzeros_]);
+ values_.reset(new double[max_num_nonzeros_]);
+
+ // rows_ = 0
+ fill(rows_.get(), rows_.get() + num_rows_ + 1, 0);
+
+ // Copy the contents of the cols and values array in the order given
+ // by index and count the number of entries in each row.
+ for (int i = 0; i < m.num_nonzeros(); ++i) {
+ const int idx = index[i];
+ ++rows_[m.rows()[idx] + 1];
+ cols_[i] = m.cols()[idx];
+ values_[i] = m.values()[idx];
+ }
+
+ // Find the cumulative sum of the row counts.
+ for (int i = 1; i < num_rows_ + 1; ++i) {
+ rows_[i] += rows_[i-1];
+ }
+
+ CHECK_EQ(num_nonzeros(), m.num_nonzeros());
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+CompressedRowSparseMatrix::CompressedRowSparseMatrix(
+ const SparseMatrixProto& outer_proto) {
+ CHECK(outer_proto.has_compressed_row_matrix());
+
+ const CompressedRowSparseMatrixProto& proto =
+ outer_proto.compressed_row_matrix();
+
+ num_rows_ = proto.num_rows();
+ num_cols_ = proto.num_cols();
+
+ rows_.reset(new int[proto.rows_size()]);
+ cols_.reset(new int[proto.cols_size()]);
+ values_.reset(new double[proto.values_size()]);
+
+ for (int i = 0; i < proto.rows_size(); ++i) {
+ rows_[i] = proto.rows(i);
+ }
+
+ CHECK_EQ(proto.rows_size(), num_rows_ + 1);
+ CHECK_EQ(proto.cols_size(), proto.values_size());
+ CHECK_EQ(proto.cols_size(), rows_[num_rows_]);
+
+ for (int i = 0; i < proto.cols_size(); ++i) {
+ cols_[i] = proto.cols(i);
+ values_[i] = proto.values(i);
+ }
+
+ max_num_nonzeros_ = proto.cols_size();
+}
+#endif
+
+CompressedRowSparseMatrix::CompressedRowSparseMatrix(const double* diagonal,
+ int num_rows) {
+ CHECK_NOTNULL(diagonal);
+
+ num_rows_ = num_rows;
+ num_cols_ = num_rows;
+ max_num_nonzeros_ = num_rows;
+
+ rows_.reset(new int[num_rows_ + 1]);
+ cols_.reset(new int[num_rows_]);
+ values_.reset(new double[num_rows_]);
+
+ rows_[0] = 0;
+ for (int i = 0; i < num_rows_; ++i) {
+ cols_[i] = i;
+ values_[i] = diagonal[i];
+ rows_[i + 1] = i + 1;
+ }
+
+ CHECK_EQ(num_nonzeros(), num_rows);
+}
+
+CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {
+}
+
+void CompressedRowSparseMatrix::SetZero() {
+ fill(values_.get(), values_.get() + num_nonzeros(), 0.0);
+}
+
+void CompressedRowSparseMatrix::RightMultiply(const double* x,
+ double* y) const {
+ CHECK_NOTNULL(x);
+ CHECK_NOTNULL(y);
+
+ for (int r = 0; r < num_rows_; ++r) {
+ for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+ y[r] += values_[idx] * x[cols_[idx]];
+ }
+ }
+}
+
+void CompressedRowSparseMatrix::LeftMultiply(const double* x, double* y) const {
+ CHECK_NOTNULL(x);
+ CHECK_NOTNULL(y);
+
+ for (int r = 0; r < num_rows_; ++r) {
+ for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+ y[cols_[idx]] += values_[idx] * x[r];
+ }
+ }
+}
+
+void CompressedRowSparseMatrix::SquaredColumnNorm(double* x) const {
+ CHECK_NOTNULL(x);
+
+ fill(x, x + num_cols_, 0.0);
+ for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
+ x[cols_[idx]] += values_[idx] * values_[idx];
+ }
+}
+
+void CompressedRowSparseMatrix::ScaleColumns(const double* scale) {
+ CHECK_NOTNULL(scale);
+
+ for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
+ values_[idx] *= scale[cols_[idx]];
+ }
+}
+
+void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
+ CHECK_NOTNULL(dense_matrix);
+ dense_matrix->resize(num_rows_, num_cols_);
+ dense_matrix->setZero();
+
+ for (int r = 0; r < num_rows_; ++r) {
+ for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+ (*dense_matrix)(r, cols_[idx]) = values_[idx];
+ }
+ }
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+void CompressedRowSparseMatrix::ToProto(SparseMatrixProto* outer_proto) const {
+ CHECK_NOTNULL(outer_proto);
+
+ outer_proto->Clear();
+ CompressedRowSparseMatrixProto* proto
+ = outer_proto->mutable_compressed_row_matrix();
+
+ proto->set_num_rows(num_rows_);
+ proto->set_num_cols(num_cols_);
+
+ for (int r = 0; r < num_rows_ + 1; ++r) {
+ proto->add_rows(rows_[r]);
+ }
+
+ for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
+ proto->add_cols(cols_[idx]);
+ proto->add_values(values_[idx]);
+ }
+}
+#endif
+
+void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
+ CHECK_GE(delta_rows, 0);
+ CHECK_LE(delta_rows, num_rows_);
+
+ int new_num_rows = num_rows_ - delta_rows;
+
+ num_rows_ = new_num_rows;
+ int* new_rows = new int[num_rows_ + 1];
+ copy(rows_.get(), rows_.get() + num_rows_ + 1, new_rows);
+ rows_.reset(new_rows);
+}
+
+void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
+ CHECK_EQ(m.num_cols(), num_cols_);
+
+ // Check if there is enough space. If not, then allocate new arrays
+ // to hold the combined matrix and copy the contents of this matrix
+ // into it.
+ if (max_num_nonzeros_ < num_nonzeros() + m.num_nonzeros()) {
+ int new_max_num_nonzeros = num_nonzeros() + m.num_nonzeros();
+
+ VLOG(1) << "Reallocating " << sizeof(int) * new_max_num_nonzeros; // NOLINT
+
+ int* new_cols = new int[new_max_num_nonzeros];
+ copy(cols_.get(), cols_.get() + max_num_nonzeros_, new_cols);
+ cols_.reset(new_cols);
+
+ double* new_values = new double[new_max_num_nonzeros];
+ copy(values_.get(), values_.get() + max_num_nonzeros_, new_values);
+ values_.reset(new_values);
+
+ max_num_nonzeros_ = new_max_num_nonzeros;
+ }
+
+ // Copy the contents of m into this matrix.
+ copy(m.cols(), m.cols() + m.num_nonzeros(), cols_.get() + num_nonzeros());
+ copy(m.values(),
+ m.values() + m.num_nonzeros(),
+ values_.get() + num_nonzeros());
+
+ // Create the new rows array to hold the enlarged matrix.
+ int* new_rows = new int[num_rows_ + m.num_rows() + 1];
+ // The first num_rows_ entries are the same
+ copy(rows_.get(), rows_.get() + num_rows_, new_rows);
+
+ // new_rows = [rows_, m.row() + rows_[num_rows_]]
+ fill(new_rows + num_rows_,
+ new_rows + num_rows_ + m.num_rows() + 1,
+ rows_[num_rows_]);
+
+ for (int r = 0; r < m.num_rows() + 1; ++r) {
+ new_rows[num_rows_ + r] += m.rows()[r];
+ }
+
+ rows_.reset(new_rows);
+ num_rows_ += m.num_rows();
+}
+
+void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
+ CHECK_NOTNULL(file);
+ for (int r = 0; r < num_rows_; ++r) {
+ for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+ fprintf(file, "% 10d % 10d %17f\n", r, cols_[idx], values_[idx]);
+ }
+ }
+}
+
+void CompressedRowSparseMatrix::ToCRSMatrix(CRSMatrix* matrix) const {
+ matrix->num_rows = num_rows();
+ matrix->num_cols = num_cols();
+
+ matrix->rows.resize(matrix->num_rows + 1);
+ matrix->cols.resize(num_nonzeros());
+ matrix->values.resize(num_nonzeros());
+
+ copy(rows_.get(), rows_.get() + matrix->num_rows + 1, matrix->rows.begin());
+ copy(cols_.get(), cols_.get() + num_nonzeros(), matrix->cols.begin());
+ copy(values_.get(), values_.get() + num_nonzeros(), matrix->values.begin());
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/compressed_row_sparse_matrix.h b/internal/ceres/compressed_row_sparse_matrix.h
new file mode 100644
index 0000000..10f96c0
--- /dev/null
+++ b/internal/ceres/compressed_row_sparse_matrix.h
@@ -0,0 +1,149 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
+
+#include <vector>
+#include <glog/logging.h>
+#include "ceres/sparse_matrix.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/macros.h"
+#include "ceres/internal/port.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+struct CRSMatrix;
+
+namespace internal {
+
+class SparseMatrixProto;
+
+class CompressedRowSparseMatrix : public SparseMatrix {
+ public:
+ // Build a matrix with the same content as the TripletSparseMatrix
+ // m. TripletSparseMatrix objects are easier to construct
+ // incrementally, so we use them to initialize SparseMatrix
+ // objects.
+ //
+ // We assume that m does not have any repeated entries.
+ explicit CompressedRowSparseMatrix(const TripletSparseMatrix& m);
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+ explicit CompressedRowSparseMatrix(const SparseMatrixProto& proto);
+#endif
+
+ // Use this constructor only if you know what you are doing. This
+ // creates a "blank" matrix with the appropriate amount of memory
+ // allocated. However, the object itself is in an inconsistent state
+ // as the rows and cols matrices do not match the values of
+ // num_rows, num_cols and max_num_nonzeros.
+ //
+ // The use case for this constructor is that when the user knows the
+ // size of the matrix to begin with and wants to update the layout
+ // manually, instead of going via the indirect route of first
+ // constructing a TripletSparseMatrix, which leads to more than
+ // double the peak memory usage.
+ CompressedRowSparseMatrix(int num_rows,
+ int num_cols,
+ int max_num_nonzeros);
+
+ // Build a square sparse diagonal matrix with num_rows rows and
+ // columns. The diagonal m(i,i) = diagonal(i);
+ CompressedRowSparseMatrix(const double* diagonal, int num_rows);
+
+ virtual ~CompressedRowSparseMatrix();
+
+ // SparseMatrix interface.
+ virtual void SetZero();
+ virtual void RightMultiply(const double* x, double* y) const;
+ virtual void LeftMultiply(const double* x, double* y) const;
+ virtual void SquaredColumnNorm(double* x) const;
+ virtual void ScaleColumns(const double* scale);
+
+ virtual void ToDenseMatrix(Matrix* dense_matrix) const;
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+ virtual void ToProto(SparseMatrixProto* proto) const;
+#endif
+ virtual void ToTextFile(FILE* file) const;
+ virtual int num_rows() const { return num_rows_; }
+ virtual int num_cols() const { return num_cols_; }
+ virtual int num_nonzeros() const { return rows_[num_rows_]; }
+ virtual const double* values() const { return values_.get(); }
+ virtual double* mutable_values() { return values_.get(); }
+
+ // Delete the bottom delta_rows.
+ // num_rows -= delta_rows
+ void DeleteRows(int delta_rows);
+
+ // Append the contents of m to the bottom of this matrix. m must
+ // have the same number of columns as this matrix.
+ void AppendRows(const CompressedRowSparseMatrix& m);
+
+ void ToCRSMatrix(CRSMatrix* matrix) const;
+
+ // Low level access methods that expose the structure of the matrix.
+ const int* cols() const { return cols_.get(); }
+ int* mutable_cols() { return cols_.get(); }
+
+ const int* rows() const { return rows_.get(); }
+ int* mutable_rows() { return rows_.get(); }
+
+ const vector<int>& row_blocks() const { return row_blocks_; }
+ vector<int>* mutable_row_blocks() { return &row_blocks_; }
+
+ const vector<int>& col_blocks() const { return col_blocks_; }
+ vector<int>* mutable_col_blocks() { return &col_blocks_; }
+
+ private:
+ scoped_array<int> cols_;
+ scoped_array<int> rows_;
+ scoped_array<double> values_;
+
+ int num_rows_;
+ int num_cols_;
+ int max_num_nonzeros_;
+
+ // If the matrix has an underlying block structure, then it can also
+ // carry with it row and column block sizes. This is auxilliary and
+ // optional information for use by algorithms operating on the
+ // matrix. The class itself does not make use of this information in
+ // any way.
+ vector<int> row_blocks_;
+ vector<int> col_blocks_;
+
+ CERES_DISALLOW_COPY_AND_ASSIGN(CompressedRowSparseMatrix);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
diff --git a/internal/ceres/compressed_row_sparse_matrix_test.cc b/internal/ceres/compressed_row_sparse_matrix_test.cc
new file mode 100644
index 0000000..c9c3f14
--- /dev/null
+++ b/internal/ceres/compressed_row_sparse_matrix_test.cc
@@ -0,0 +1,203 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/compressed_row_sparse_matrix.h"
+
+#include "ceres/casts.h"
+#include "ceres/crs_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/matrix_proto.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+void CompareMatrices(const SparseMatrix* a, const SparseMatrix* b) {
+ EXPECT_EQ(a->num_rows(), b->num_rows());
+ EXPECT_EQ(a->num_cols(), b->num_cols());
+
+ int num_rows = a->num_rows();
+ int num_cols = a->num_cols();
+
+ for (int i = 0; i < num_cols; ++i) {
+ Vector x = Vector::Zero(num_cols);
+ x(i) = 1.0;
+
+ Vector y_a = Vector::Zero(num_rows);
+ Vector y_b = Vector::Zero(num_rows);
+
+ a->RightMultiply(x.data(), y_a.data());
+ b->RightMultiply(x.data(), y_b.data());
+
+ EXPECT_EQ((y_a - y_b).norm(), 0);
+ }
+}
+
+class CompressedRowSparseMatrixTest : public ::testing::Test {
+ protected :
+ virtual void SetUp() {
+ scoped_ptr<LinearLeastSquaresProblem> problem(
+ CreateLinearLeastSquaresProblemFromId(1));
+
+ CHECK_NOTNULL(problem.get());
+
+ tsm.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
+ crsm.reset(new CompressedRowSparseMatrix(*tsm));
+
+ num_rows = tsm->num_rows();
+ num_cols = tsm->num_cols();
+ }
+
+ int num_rows;
+ int num_cols;
+
+ scoped_ptr<TripletSparseMatrix> tsm;
+ scoped_ptr<CompressedRowSparseMatrix> crsm;
+};
+
+TEST_F(CompressedRowSparseMatrixTest, RightMultiply) {
+ CompareMatrices(tsm.get(), crsm.get());
+}
+
+TEST_F(CompressedRowSparseMatrixTest, LeftMultiply) {
+ for (int i = 0; i < num_rows; ++i) {
+ Vector a = Vector::Zero(num_rows);
+ a(i) = 1.0;
+
+ Vector b1 = Vector::Zero(num_cols);
+ Vector b2 = Vector::Zero(num_cols);
+
+ tsm->LeftMultiply(a.data(), b1.data());
+ crsm->LeftMultiply(a.data(), b2.data());
+
+ EXPECT_EQ((b1 - b2).norm(), 0);
+ }
+}
+
+TEST_F(CompressedRowSparseMatrixTest, ColumnNorm) {
+ Vector b1 = Vector::Zero(num_cols);
+ Vector b2 = Vector::Zero(num_cols);
+
+ tsm->SquaredColumnNorm(b1.data());
+ crsm->SquaredColumnNorm(b2.data());
+
+ EXPECT_EQ((b1 - b2).norm(), 0);
+}
+
+TEST_F(CompressedRowSparseMatrixTest, Scale) {
+ Vector scale(num_cols);
+ for (int i = 0; i < num_cols; ++i) {
+ scale(i) = i + 1;
+ }
+
+ tsm->ScaleColumns(scale.data());
+ crsm->ScaleColumns(scale.data());
+ CompareMatrices(tsm.get(), crsm.get());
+}
+
+TEST_F(CompressedRowSparseMatrixTest, DeleteRows) {
+ for (int i = 0; i < num_rows; ++i) {
+ tsm->Resize(num_rows - i, num_cols);
+ crsm->DeleteRows(crsm->num_rows() - tsm->num_rows());
+ CompareMatrices(tsm.get(), crsm.get());
+ }
+}
+
+TEST_F(CompressedRowSparseMatrixTest, AppendRows) {
+ for (int i = 0; i < num_rows; ++i) {
+ TripletSparseMatrix tsm_appendage(*tsm);
+ tsm_appendage.Resize(i, num_cols);
+
+ tsm->AppendRows(tsm_appendage);
+ CompressedRowSparseMatrix crsm_appendage(tsm_appendage);
+ crsm->AppendRows(crsm_appendage);
+
+ CompareMatrices(tsm.get(), crsm.get());
+ }
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+TEST_F(CompressedRowSparseMatrixTest, Serialization) {
+ SparseMatrixProto proto;
+ crsm->ToProto(&proto);
+
+ CompressedRowSparseMatrix n(proto);
+ ASSERT_EQ(n.num_rows(), crsm->num_rows());
+ ASSERT_EQ(n.num_cols(), crsm->num_cols());
+ ASSERT_EQ(n.num_nonzeros(), crsm->num_nonzeros());
+
+ for (int i = 0; i < n.num_rows() + 1; ++i) {
+ ASSERT_EQ(crsm->rows()[i], proto.compressed_row_matrix().rows(i));
+ ASSERT_EQ(crsm->rows()[i], n.rows()[i]);
+ }
+
+ for (int i = 0; i < crsm->num_nonzeros(); ++i) {
+ ASSERT_EQ(crsm->cols()[i], proto.compressed_row_matrix().cols(i));
+ ASSERT_EQ(crsm->cols()[i], n.cols()[i]);
+ ASSERT_EQ(crsm->values()[i], proto.compressed_row_matrix().values(i));
+ ASSERT_EQ(crsm->values()[i], n.values()[i]);
+ }
+}
+#endif
+
+TEST_F(CompressedRowSparseMatrixTest, ToDenseMatrix) {
+ Matrix tsm_dense;
+ Matrix crsm_dense;
+
+ tsm->ToDenseMatrix(&tsm_dense);
+ crsm->ToDenseMatrix(&crsm_dense);
+
+ EXPECT_EQ((tsm_dense - crsm_dense).norm(), 0.0);
+}
+
+TEST_F(CompressedRowSparseMatrixTest, ToCRSMatrix) {
+ CRSMatrix crs_matrix;
+ crsm->ToCRSMatrix(&crs_matrix);
+ EXPECT_EQ(crsm->num_rows(), crs_matrix.num_rows);
+ EXPECT_EQ(crsm->num_cols(), crs_matrix.num_cols);
+ EXPECT_EQ(crsm->num_rows() + 1, crs_matrix.rows.size());
+ EXPECT_EQ(crsm->num_nonzeros(), crs_matrix.cols.size());
+ EXPECT_EQ(crsm->num_nonzeros(), crs_matrix.values.size());
+
+ for (int i = 0; i < crsm->num_rows() + 1; ++i) {
+ EXPECT_EQ(crsm->rows()[i], crs_matrix.rows[i]);
+ }
+
+ for (int i = 0; i < crsm->num_nonzeros(); ++i) {
+ EXPECT_EQ(crsm->cols()[i], crs_matrix.cols[i]);
+ EXPECT_EQ(crsm->values()[i], crs_matrix.values[i]);
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/conditioned_cost_function.cc b/internal/ceres/conditioned_cost_function.cc
new file mode 100644
index 0000000..7322790
--- /dev/null
+++ b/internal/ceres/conditioned_cost_function.cc
@@ -0,0 +1,130 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wjr@google.com (William Rucklidge)
+//
+// This file contains the implementation of the conditioned cost function.
+
+#include "ceres/conditioned_cost_function.h"
+
+#include <cstddef>
+
+#include "ceres/internal/eigen.h"
+#include "ceres/stl_util.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+// This cost function has the same dimensions (parameters, residuals) as
+// the one it's wrapping.
+ConditionedCostFunction::ConditionedCostFunction(
+ CostFunction* wrapped_cost_function,
+ const vector<CostFunction*>& conditioners,
+ Ownership ownership)
+ : wrapped_cost_function_(wrapped_cost_function),
+ conditioners_(conditioners),
+ ownership_(ownership) {
+ // Set up our dimensions.
+ set_num_residuals(wrapped_cost_function_->num_residuals());
+ *mutable_parameter_block_sizes() =
+ wrapped_cost_function_->parameter_block_sizes();
+
+ // Sanity-check the conditioners' dimensions.
+ CHECK_EQ(wrapped_cost_function_->num_residuals(), conditioners_.size());
+ for (int i = 0; i < wrapped_cost_function_->num_residuals(); i++) {
+ if (conditioners[i]) {
+ CHECK_EQ(1, conditioners[i]->num_residuals());
+ CHECK_EQ(1, conditioners[i]->parameter_block_sizes().size());
+ CHECK_EQ(1, conditioners[i]->parameter_block_sizes()[0]);
+ }
+ }
+}
+
+ConditionedCostFunction::~ConditionedCostFunction() {
+ if (ownership_ == TAKE_OWNERSHIP) {
+ STLDeleteElements(&conditioners_);
+ } else {
+ wrapped_cost_function_.release();
+ }
+}
+
+bool ConditionedCostFunction::Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ bool success = wrapped_cost_function_->Evaluate(parameters, residuals,
+ jacobians);
+ if (!success) {
+ return false;
+ }
+
+ for (int r = 0; r < wrapped_cost_function_->num_residuals(); r++) {
+ // On output, we want to have
+ // residuals[r] = conditioners[r](wrapped_residuals[r])
+ // For parameter block i, column c,
+ // jacobians[i][r*parameter_block_size_[i] + c] =
+ // = d residual[r] / d parameters[i][c]
+ // = conditioners[r]'(wrapped_residuals[r]) *
+ // d wrapped_residuals[r] / d parameters[i][c]
+ if (conditioners_[r]) {
+ double conditioner_derivative;
+ double* conditioner_derivative_pointer = &conditioner_derivative;
+ double** conditioner_derivative_pointer2 =
+ &conditioner_derivative_pointer;
+ if (!jacobians) {
+ conditioner_derivative_pointer2 = NULL;
+ }
+
+ double unconditioned_residual = residuals[r];
+ double* parameter_pointer = &unconditioned_residual;
+ success = conditioners_[r]->Evaluate(&parameter_pointer,
+ &residuals[r],
+ conditioner_derivative_pointer2);
+ if (!success) {
+ return false;
+ }
+
+ if (jacobians) {
+ for (int i = 0;
+ i < wrapped_cost_function_->parameter_block_sizes().size();
+ i++) {
+ if (jacobians[i]) {
+ int parameter_block_size =
+ wrapped_cost_function_->parameter_block_sizes()[i];
+ VectorRef jacobian_row(jacobians[i] + r * parameter_block_size,
+ parameter_block_size, 1);
+ jacobian_row *= conditioner_derivative;
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+
+} // namespace ceres
diff --git a/internal/ceres/conditioned_cost_function_test.cc b/internal/ceres/conditioned_cost_function_test.cc
new file mode 100644
index 0000000..03c553f
--- /dev/null
+++ b/internal/ceres/conditioned_cost_function_test.cc
@@ -0,0 +1,126 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wjr@google.com (William Rucklidge)
+//
+// Tests for the conditioned cost function.
+
+#include "ceres/conditioned_cost_function.h"
+
+#include "ceres/internal/eigen.h"
+#include "ceres/normal_prior.h"
+#include "ceres/types.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// The size of the cost functions we build.
+static const int kTestCostFunctionSize = 3;
+
+// A simple cost function: return ax + b.
+class LinearCostFunction : public CostFunction {
+ public:
+ LinearCostFunction(double a, double b) : a_(a), b_(b) {
+ set_num_residuals(1);
+ mutable_parameter_block_sizes()->push_back(1);
+ }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ *residuals = **parameters * a_ + b_;
+ if (jacobians && *jacobians) {
+ **jacobians = a_;
+ }
+
+ return true;
+ }
+
+ private:
+ const double a_, b_;
+};
+
+// Tests that ConditionedCostFunction does what it's supposed to.
+TEST(CostFunctionTest, ConditionedCostFunction) {
+ double v1[kTestCostFunctionSize], v2[kTestCostFunctionSize],
+ jac[kTestCostFunctionSize * kTestCostFunctionSize],
+ result[kTestCostFunctionSize];
+
+ for (int i = 0; i < kTestCostFunctionSize; i++) {
+ v1[i] = i;
+ v2[i] = i * 10;
+ // Seed a few garbage values in the Jacobian matrix, to make sure that
+ // they're overwritten.
+ jac[i * 2] = i * i;
+ result[i] = i * i * i;
+ }
+
+ // Make a cost function that computes x - v2
+ VectorRef v2_vector(v2, kTestCostFunctionSize, 1);
+ Matrix identity(kTestCostFunctionSize, kTestCostFunctionSize);
+ identity.setIdentity();
+ NormalPrior* difference_cost_function = new NormalPrior(identity, v2_vector);
+
+ vector<CostFunction*> conditioners;
+ for (int i = 0; i < kTestCostFunctionSize; i++) {
+ conditioners.push_back(new LinearCostFunction(i + 2, i * 7));
+ }
+
+ ConditionedCostFunction conditioned_cost_function(difference_cost_function,
+ conditioners,
+ TAKE_OWNERSHIP);
+ EXPECT_EQ(difference_cost_function->num_residuals(),
+ conditioned_cost_function.num_residuals());
+ EXPECT_EQ(difference_cost_function->parameter_block_sizes(),
+ conditioned_cost_function.parameter_block_sizes());
+
+ double *parameters[1];
+ parameters[0] = v1;
+ double *jacs[1];
+ jacs[0] = jac;
+
+ conditioned_cost_function.Evaluate(parameters, result, jacs);
+ for (int i = 0; i < kTestCostFunctionSize; i++) {
+ EXPECT_DOUBLE_EQ((i + 2) * (v1[i] - v2[i]) + i * 7, result[i]);
+ }
+
+ for (int i = 0; i < kTestCostFunctionSize; i++) {
+ for (int j = 0; j < kTestCostFunctionSize; j++) {
+ double actual = jac[i * kTestCostFunctionSize + j];
+ if (i != j) {
+ EXPECT_DOUBLE_EQ(0, actual);
+ } else {
+ EXPECT_DOUBLE_EQ(i + 2, actual);
+ }
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/conjugate_gradients_solver.cc b/internal/ceres/conjugate_gradients_solver.cc
new file mode 100644
index 0000000..ae8e877
--- /dev/null
+++ b/internal/ceres/conjugate_gradients_solver.cc
@@ -0,0 +1,233 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// A preconditioned conjugate gradients solver
+// (ConjugateGradientsSolver) for positive semidefinite linear
+// systems.
+//
+// We have also augmented the termination criterion used by this
+// solver to support not just residual based termination but also
+// termination based on decrease in the value of the quadratic model
+// that CG optimizes.
+
+#include "ceres/conjugate_gradients_solver.h"
+
+#include <cmath>
+#include <cstddef>
+#include "ceres/fpclassify.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_operator.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+bool IsZeroOrInfinity(double x) {
+ return ((x == 0.0) || (IsInfinite(x)));
+}
+
+// Constant used in the MATLAB implementation ~ 2 * eps.
+const double kEpsilon = 2.2204e-16;
+
+} // namespace
+
+ConjugateGradientsSolver::ConjugateGradientsSolver(
+ const LinearSolver::Options& options)
+ : options_(options) {
+}
+
+LinearSolver::Summary ConjugateGradientsSolver::Solve(
+ LinearOperator* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) {
+ CHECK_NOTNULL(A);
+ CHECK_NOTNULL(x);
+ CHECK_NOTNULL(b);
+ CHECK_EQ(A->num_rows(), A->num_cols());
+
+ LinearSolver::Summary summary;
+ summary.termination_type = MAX_ITERATIONS;
+ summary.num_iterations = 0;
+
+ int num_cols = A->num_cols();
+ VectorRef xref(x, num_cols);
+ ConstVectorRef bref(b, num_cols);
+
+ double norm_b = bref.norm();
+ if (norm_b == 0.0) {
+ xref.setZero();
+ summary.termination_type = TOLERANCE;
+ return summary;
+ }
+
+ Vector r(num_cols);
+ Vector p(num_cols);
+ Vector z(num_cols);
+ Vector tmp(num_cols);
+
+ double tol_r = per_solve_options.r_tolerance * norm_b;
+
+ tmp.setZero();
+ A->RightMultiply(x, tmp.data());
+ r = bref - tmp;
+ double norm_r = r.norm();
+
+ if (norm_r <= tol_r) {
+ summary.termination_type = TOLERANCE;
+ return summary;
+ }
+
+ double rho = 1.0;
+
+ // Initial value of the quadratic model Q = x'Ax - 2 * b'x.
+ double Q0 = -1.0 * xref.dot(bref + r);
+
+ for (summary.num_iterations = 1;
+ summary.num_iterations < options_.max_num_iterations;
+ ++summary.num_iterations) {
+ VLOG(3) << "cg iteration " << summary.num_iterations;
+
+ // Apply preconditioner
+ if (per_solve_options.preconditioner != NULL) {
+ z.setZero();
+ per_solve_options.preconditioner->RightMultiply(r.data(), z.data());
+ } else {
+ z = r;
+ }
+
+ double last_rho = rho;
+ rho = r.dot(z);
+
+ if (IsZeroOrInfinity(rho)) {
+ LOG(ERROR) << "Numerical failure. rho = " << rho;
+ summary.termination_type = FAILURE;
+ break;
+ };
+
+ if (summary.num_iterations == 1) {
+ p = z;
+ } else {
+ double beta = rho / last_rho;
+ if (IsZeroOrInfinity(beta)) {
+ LOG(ERROR) << "Numerical failure. beta = " << beta;
+ summary.termination_type = FAILURE;
+ break;
+ }
+ p = z + beta * p;
+ }
+
+ Vector& q = z;
+ q.setZero();
+ A->RightMultiply(p.data(), q.data());
+ double pq = p.dot(q);
+
+ if ((pq <= 0) || IsInfinite(pq)) {
+ LOG(ERROR) << "Numerical failure. pq = " << pq;
+ summary.termination_type = FAILURE;
+ break;
+ }
+
+ double alpha = rho / pq;
+ if (IsInfinite(alpha)) {
+ LOG(ERROR) << "Numerical failure. alpha " << alpha;
+ summary.termination_type = FAILURE;
+ break;
+ }
+
+ xref = xref + alpha * p;
+
+ // Ideally we would just use the update r = r - alpha*q to keep
+ // track of the residual vector. However this estimate tends to
+ // drift over time due to round off errors. Thus every
+ // residual_reset_period iterations, we calculate the residual as
+ // r = b - Ax. We do not do this every iteration because this
+ // requires an additional matrix vector multiply which would
+ // double the complexity of the CG algorithm.
+ if (summary.num_iterations % options_.residual_reset_period == 0) {
+ tmp.setZero();
+ A->RightMultiply(x, tmp.data());
+ r = bref - tmp;
+ } else {
+ r = r - alpha * q;
+ }
+
+ // Quadratic model based termination.
+ // Q1 = x'Ax - 2 * b' x.
+ double Q1 = -1.0 * xref.dot(bref + r);
+
+ // For PSD matrices A, let
+ //
+ // Q(x) = x'Ax - 2b'x
+ //
+ // be the cost of the quadratic function defined by A and b. Then,
+ // the solver terminates at iteration i if
+ //
+ // i * (Q(x_i) - Q(x_i-1)) / Q(x_i) < q_tolerance.
+ //
+ // This termination criterion is more useful when using CG to
+ // solve the Newton step. This particular convergence test comes
+ // from Stephen Nash's work on truncated Newton
+ // methods. References:
+ //
+ // 1. Stephen G. Nash & Ariela Sofer, Assessing A Search
+ // Direction Within A Truncated Newton Method, Operation
+ // Research Letters 9(1990) 219-221.
+ //
+ // 2. Stephen G. Nash, A Survey of Truncated Newton Methods,
+ // Journal of Computational and Applied Mathematics,
+ // 124(1-2), 45-59, 2000.
+ //
+ double zeta = summary.num_iterations * (Q1 - Q0) / Q1;
+ VLOG(3) << "Q termination: zeta " << zeta
+ << " " << per_solve_options.q_tolerance;
+ if (zeta < per_solve_options.q_tolerance) {
+ summary.termination_type = TOLERANCE;
+ break;
+ }
+ Q0 = Q1;
+
+ // Residual based termination.
+ norm_r = r. norm();
+ VLOG(3) << "R termination: norm_r " << norm_r
+ << " " << tol_r;
+ if (norm_r <= tol_r) {
+ summary.termination_type = TOLERANCE;
+ break;
+ }
+ }
+
+ return summary;
+};
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/conjugate_gradients_solver.h b/internal/ceres/conjugate_gradients_solver.h
new file mode 100644
index 0000000..b8dfa56
--- /dev/null
+++ b/internal/ceres/conjugate_gradients_solver.h
@@ -0,0 +1,74 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Preconditioned Conjugate Gradients based solver for positive
+// semidefinite linear systems.
+
+#ifndef CERES_INTERNAL_CONJUGATE_GRADIENTS_SOLVER_H_
+#define CERES_INTERNAL_CONJUGATE_GRADIENTS_SOLVER_H_
+
+#include "ceres/linear_solver.h"
+#include "ceres/internal/macros.h"
+
+namespace ceres {
+namespace internal {
+
+class LinearOperator;
+
+// This class implements the now classical Conjugate Gradients
+// algorithm of Hestenes & Stiefel for solving postive semidefinite
+// linear sytems. Optionally it can use a preconditioner also to
+// reduce the condition number of the linear system and improve the
+// convergence rate. Modern references for Conjugate Gradients are the
+// books by Yousef Saad and Trefethen & Bau. This implementation of CG
+// has been augmented with additional termination tests that are
+// needed for forcing early termination when used as part of an
+// inexact Newton solver.
+//
+// For more details see the documentation for
+// LinearSolver::PerSolveOptions::r_tolerance and
+// LinearSolver::PerSolveOptions::q_tolerance in linear_solver.h.
+class ConjugateGradientsSolver : public LinearSolver {
+ public:
+ explicit ConjugateGradientsSolver(const LinearSolver::Options& options);
+ virtual Summary Solve(LinearOperator* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x);
+
+ private:
+ const LinearSolver::Options options_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(ConjugateGradientsSolver);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_CONJUGATE_GRADIENTS_SOLVER_H_
diff --git a/internal/ceres/coordinate_descent_minimizer.cc b/internal/ceres/coordinate_descent_minimizer.cc
new file mode 100644
index 0000000..5ce110c
--- /dev/null
+++ b/internal/ceres/coordinate_descent_minimizer.cc
@@ -0,0 +1,235 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/coordinate_descent_minimizer.h"
+
+#ifdef CERES_USE_OPENMP
+#include <omp.h>
+#endif
+
+#include <iterator>
+#include <numeric>
+#include <vector>
+#include "ceres/evaluator.h"
+#include "ceres/linear_solver.h"
+#include "ceres/minimizer.h"
+#include "ceres/ordered_groups.h"
+#include "ceres/parameter_block.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/solver.h"
+#include "ceres/solver_impl.h"
+#include "ceres/trust_region_minimizer.h"
+#include "ceres/trust_region_strategy.h"
+
+namespace ceres {
+namespace internal {
+
+CoordinateDescentMinimizer::~CoordinateDescentMinimizer() {
+}
+
+bool CoordinateDescentMinimizer::Init(
+ const Program& program,
+ const ProblemImpl::ParameterMap& parameter_map,
+ const ParameterBlockOrdering& ordering,
+ string* error) {
+ parameter_blocks_.clear();
+ independent_set_offsets_.clear();
+ independent_set_offsets_.push_back(0);
+
+ // Serialize the OrderedGroups into a vector of parameter block
+ // offsets for parallel access.
+ map<ParameterBlock*, int> parameter_block_index;
+ map<int, set<double*> > group_to_elements = ordering.group_to_elements();
+ for (map<int, set<double*> >::const_iterator it = group_to_elements.begin();
+ it != group_to_elements.end();
+ ++it) {
+ for (set<double*>::const_iterator ptr_it = it->second.begin();
+ ptr_it != it->second.end();
+ ++ptr_it) {
+ parameter_blocks_.push_back(parameter_map.find(*ptr_it)->second);
+ parameter_block_index[parameter_blocks_.back()] =
+ parameter_blocks_.size() - 1;
+ }
+ independent_set_offsets_.push_back(
+ independent_set_offsets_.back() + it->second.size());
+ }
+
+ // The ordering does not have to contain all parameter blocks, so
+ // assign zero offsets/empty independent sets to these parameter
+ // blocks.
+ const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
+ for (int i = 0; i < parameter_blocks.size(); ++i) {
+ if (!ordering.IsMember(parameter_blocks[i]->mutable_user_state())) {
+ parameter_blocks_.push_back(parameter_blocks[i]);
+ independent_set_offsets_.push_back(independent_set_offsets_.back());
+ }
+ }
+
+ // Compute the set of residual blocks that depend on each parameter
+ // block.
+ residual_blocks_.resize(parameter_block_index.size());
+ const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
+ for (int i = 0; i < residual_blocks.size(); ++i) {
+ ResidualBlock* residual_block = residual_blocks[i];
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+ const map<ParameterBlock*, int>::const_iterator it =
+ parameter_block_index.find(parameter_block);
+ if (it != parameter_block_index.end()) {
+ residual_blocks_[it->second].push_back(residual_block);
+ }
+ }
+ }
+
+ evaluator_options_.linear_solver_type = DENSE_QR;
+ evaluator_options_.num_eliminate_blocks = 0;
+ evaluator_options_.num_threads = 1;
+
+ return true;
+}
+
+void CoordinateDescentMinimizer::Minimize(
+ const Minimizer::Options& options,
+ double* parameters,
+ Solver::Summary* summary) {
+ // Set the state and mark all parameter blocks constant.
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ ParameterBlock* parameter_block = parameter_blocks_[i];
+ parameter_block->SetState(parameters + parameter_block->state_offset());
+ parameter_block->SetConstant();
+ }
+
+ scoped_array<LinearSolver*> linear_solvers(new LinearSolver*[options.num_threads]);
+
+ LinearSolver::Options linear_solver_options;
+ linear_solver_options.type = DENSE_QR;
+
+ for (int i = 0; i < options.num_threads; ++i) {
+ linear_solvers[i] = LinearSolver::Create(linear_solver_options);
+ }
+
+ for (int i = 0; i < independent_set_offsets_.size() - 1; ++i) {
+ // No point paying the price for an OpemMP call if the set if of
+ // size zero.
+ if (independent_set_offsets_[i] == independent_set_offsets_[i + 1]) {
+ continue;
+ }
+
+ // The parameter blocks in each independent set can be optimized
+ // in parallel, since they do not co-occur in any residual block.
+#pragma omp parallel for num_threads(options.num_threads)
+ for (int j = independent_set_offsets_[i];
+ j < independent_set_offsets_[i + 1];
+ ++j) {
+#ifdef CERES_USE_OPENMP
+ int thread_id = omp_get_thread_num();
+#else
+ int thread_id = 0;
+#endif
+
+ ParameterBlock* parameter_block = parameter_blocks_[j];
+ const int old_index = parameter_block->index();
+ const int old_delta_offset = parameter_block->delta_offset();
+ parameter_block->SetVarying();
+ parameter_block->set_index(0);
+ parameter_block->set_delta_offset(0);
+
+ Program inner_program;
+ inner_program.mutable_parameter_blocks()->push_back(parameter_block);
+ *inner_program.mutable_residual_blocks() = residual_blocks_[j];
+
+ // TODO(sameeragarwal): Better error handling. Right now we
+ // assume that this is not going to lead to problems of any
+ // sort. Basically we should be checking for numerical failure
+ // of some sort.
+ //
+ // On the other hand, if the optimization is a failure, that in
+ // some ways is fine, since it won't change the parameters and
+ // we are fine.
+ Solver::Summary inner_summary;
+ Solve(&inner_program,
+ linear_solvers[thread_id],
+ parameters + parameter_block->state_offset(),
+ &inner_summary);
+
+ parameter_block->set_index(old_index);
+ parameter_block->set_delta_offset(old_delta_offset);
+ parameter_block->SetState(parameters + parameter_block->state_offset());
+ parameter_block->SetConstant();
+ }
+ }
+
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ parameter_blocks_[i]->SetVarying();
+ }
+
+ for (int i = 0; i < options.num_threads; ++i) {
+ delete linear_solvers[i];
+ }
+}
+
+// Solve the optimization problem for one parameter block.
+void CoordinateDescentMinimizer::Solve(Program* program,
+ LinearSolver* linear_solver,
+ double* parameter,
+ Solver::Summary* summary) {
+ *summary = Solver::Summary();
+ summary->initial_cost = 0.0;
+ summary->fixed_cost = 0.0;
+ summary->final_cost = 0.0;
+ string error;
+
+ scoped_ptr<Evaluator> evaluator(
+ Evaluator::Create(evaluator_options_, program, &error));
+ CHECK_NOTNULL(evaluator.get());
+
+ scoped_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
+ CHECK_NOTNULL(jacobian.get());
+
+ TrustRegionStrategy::Options trs_options;
+ trs_options.linear_solver = linear_solver;
+
+ scoped_ptr<TrustRegionStrategy>trust_region_strategy(
+ CHECK_NOTNULL(TrustRegionStrategy::Create(trs_options)));
+
+ Minimizer::Options minimizer_options;
+ minimizer_options.evaluator = evaluator.get();
+ minimizer_options.jacobian = jacobian.get();
+ minimizer_options.trust_region_strategy = trust_region_strategy.get();
+
+ TrustRegionMinimizer minimizer;
+ minimizer.Minimize(minimizer_options, parameter, summary);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/coordinate_descent_minimizer.h b/internal/ceres/coordinate_descent_minimizer.h
new file mode 100644
index 0000000..fb96b3c
--- /dev/null
+++ b/internal/ceres/coordinate_descent_minimizer.h
@@ -0,0 +1,86 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_
+#define CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_
+
+#include "ceres/evaluator.h"
+#include "ceres/minimizer.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/solver.h"
+
+namespace ceres {
+namespace internal {
+
+// Given a Program, and a ParameterBlockOrdering which partitions
+// (non-exhaustively) the Hessian matrix into independent sets,
+// perform coordinate descent on the parameter blocks in the
+// ordering. The independent set structure allows for all parameter
+// blocks in the same independent set to be optimized in parallel, and
+// the order of the independent set determines the order in which the
+// parameter block groups are optimized.
+//
+// The minimizer assumes that none of the parameter blocks in the
+// program are constant.
+class CoordinateDescentMinimizer : public Minimizer {
+ public:
+ bool Init(const Program& program,
+ const ProblemImpl::ParameterMap& parameter_map,
+ const ParameterBlockOrdering& ordering,
+ string* error);
+
+ // Minimizer interface.
+ virtual ~CoordinateDescentMinimizer();
+ virtual void Minimize(const Minimizer::Options& options,
+ double* parameters,
+ Solver::Summary* summary);
+
+ private:
+ void Solve(Program* program,
+ LinearSolver* linear_solver,
+ double* parameters,
+ Solver::Summary* summary);
+
+ vector<ParameterBlock*> parameter_blocks_;
+ vector<vector<ResidualBlock*> > residual_blocks_;
+ // The optimization is performed in rounds. In each round all the
+ // parameter blocks that form one independent set are optimized in
+ // parallel. This array, marks the boundaries of the independent
+ // sets in parameter_blocks_.
+ vector<int> independent_set_offsets_;
+
+ Evaluator::Options evaluator_options_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_
diff --git a/internal/ceres/corrector.cc b/internal/ceres/corrector.cc
new file mode 100644
index 0000000..eff4dff
--- /dev/null
+++ b/internal/ceres/corrector.cc
@@ -0,0 +1,125 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/corrector.h"
+
+#include <cstddef>
+#include <cmath>
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+Corrector::Corrector(double sq_norm, const double rho[3]) {
+ CHECK_GE(sq_norm, 0.0);
+ CHECK_GT(rho[1], 0.0);
+ sqrt_rho1_ = sqrt(rho[1]);
+
+ // If sq_norm = 0.0, the correction becomes trivial, the residual
+ // and the jacobian are scaled by the squareroot of the derivative
+ // of rho. Handling this case explicitly avoids the divide by zero
+ // error that would occur below.
+ //
+ // The case where rho'' < 0 also gets special handling. Technically
+ // it shouldn't, and the computation of the scaling should proceed
+ // as below, however we found in experiments that applying the
+ // curvature correction when rho'' < 0, which is the case when we
+ // are in the outlier region slows down the convergence of the
+ // algorithm significantly.
+ //
+ // Thus, we have divided the action of the robustifier into two
+ // parts. In the inliner region, we do the full second order
+ // correction which re-wights the gradient of the function by the
+ // square root of the derivative of rho, and the Gauss-Newton
+ // Hessian gets both the scaling and the rank-1 curvature
+ // correction. Normaly, alpha is upper bounded by one, but with this
+ // change, alpha is bounded above by zero.
+ //
+ // Empirically we have observed that the full Triggs correction and
+ // the clamped correction both start out as very good approximations
+ // to the loss function when we are in the convex part of the
+ // function, but as the function starts transitioning from convex to
+ // concave, the Triggs approximation diverges more and more and
+ // ultimately becomes linear. The clamped Triggs model however
+ // remains quadratic.
+ //
+ // The reason why the Triggs approximation becomes so poor is
+ // because the curvature correction that it applies to the gauss
+ // newton hessian goes from being a full rank correction to a rank
+ // deficient correction making the inversion of the Hessian fraught
+ // with all sorts of misery and suffering.
+ //
+ // The clamped correction retains its quadratic nature and inverting it
+ // is always well formed.
+ if ((sq_norm == 0.0) || (rho[2] <= 0.0)) {
+ residual_scaling_ = sqrt_rho1_;
+ alpha_sq_norm_ = 0.0;
+ return;
+ }
+
+ // Calculate the smaller of the two solutions to the equation
+ //
+ // 0.5 * alpha^2 - alpha - rho'' / rho' * z'z = 0.
+ //
+ // Start by calculating the discriminant D.
+ const double D = 1.0 + 2.0 * sq_norm*rho[2] / rho[1];
+
+ // Since both rho[1] and rho[2] are guaranteed to be positive at
+ // this point, we know that D > 1.0.
+
+ const double alpha = 1.0 - sqrt(D);
+
+ // Calculate the constants needed by the correction routines.
+ residual_scaling_ = sqrt_rho1_ / (1 - alpha);
+ alpha_sq_norm_ = alpha / sq_norm;
+}
+
+void Corrector::CorrectResiduals(int nrow, double* residuals) {
+ DCHECK(residuals != NULL);
+ VectorRef r_ref(residuals, nrow);
+ // Equation 11 in BANS.
+ r_ref *= residual_scaling_;
+}
+
+void Corrector::CorrectJacobian(int nrow, int ncol,
+ double* residuals, double* jacobian) {
+ DCHECK(residuals != NULL);
+ DCHECK(jacobian != NULL);
+ ConstVectorRef r_ref(residuals, nrow);
+ MatrixRef j_ref(jacobian, nrow, ncol);
+
+ // Equation 11 in BANS.
+ j_ref = sqrt_rho1_ * (j_ref - alpha_sq_norm_ *
+ r_ref * (r_ref.transpose() * j_ref));
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/corrector.h b/internal/ceres/corrector.h
new file mode 100644
index 0000000..9914641
--- /dev/null
+++ b/internal/ceres/corrector.h
@@ -0,0 +1,88 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Class definition for the object that is responsible for applying a
+// second order correction to the Gauss-Newton based on the ideas in
+// BANS by Triggs et al.
+
+#ifndef CERES_INTERNAL_CORRECTOR_H_
+#define CERES_INTERNAL_CORRECTOR_H_
+
+namespace ceres {
+namespace internal {
+
+// Corrector is responsible for applying the second order correction
+// to the residual and jacobian of a least squares problem based on a
+// radial robust loss.
+//
+// The key idea here is to look at the expressions for the robustified
+// gauss newton approximation and then take its squareroot to get the
+// corresponding corrections to the residual and jacobian. For the
+// full expressions see Eq. 10 and 11 in BANS by Triggs et al.
+class Corrector {
+ public:
+ // The constructor takes the squared norm, the value, the first and
+ // second derivatives of the LossFunction. It precalculates some of
+ // the constants that are needed to apply the correction. The
+ // correction constant alpha is constrained to be smaller than 1, if
+ // it becomes larger than 1, then it will reverse the sign of the
+ // residual and the correction. If alpha is equal to 1 will result
+ // in a divide by zero error. Thus we constrain alpha to be upper
+ // bounded by 1 - epsilon_.
+ //
+ // rho[1] needs to be positive. The constructor will crash if this
+ // condition is not met.
+ //
+ // In practical use CorrectJacobian should always be called before
+ // CorrectResidual, because the jacobian correction depends on the
+ // value of the uncorrected residual values.
+ explicit Corrector(double sq_norm, const double rho[3]);
+
+ // residuals *= sqrt(rho[1]) / (1 - alpha)
+ void CorrectResiduals(int nrow, double* residuals);
+
+ // jacobian = sqrt(rho[1]) * jacobian -
+ // sqrt(rho[1]) * alpha / sq_norm * residuals residuals' * jacobian.
+ //
+ // The method assumes that the jacobian has row-major storage. It is
+ // the caller's responsibility to ensure that the pointer to
+ // jacobian is not null.
+ void CorrectJacobian(int nrow, int ncol,
+ double* residuals, double* jacobian);
+
+ private:
+ double sqrt_rho1_;
+ double residual_scaling_;
+ double alpha_sq_norm_;
+};
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_CORRECTOR_H_
diff --git a/internal/ceres/corrector_test.cc b/internal/ceres/corrector_test.cc
new file mode 100644
index 0000000..55e7d6b
--- /dev/null
+++ b/internal/ceres/corrector_test.cc
@@ -0,0 +1,276 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/corrector.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstring>
+#include <cstdlib>
+#include "gtest/gtest.h"
+#include "ceres/random.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+// If rho[1] is zero, the Corrector constructor should crash.
+TEST(Corrector, ZeroGradientDeathTest) {
+ const double kRho[] = {0.0, 0.0, 0.0};
+ EXPECT_DEATH_IF_SUPPORTED({Corrector c(1.0, kRho);},
+ ".*");
+}
+
+// If rho[1] is negative, the Corrector constructor should crash.
+TEST(Corrector, NegativeGradientDeathTest) {
+ const double kRho[] = {0.0, -0.1, 0.0};
+ EXPECT_DEATH_IF_SUPPORTED({Corrector c(1.0, kRho);},
+ ".*");
+}
+
+TEST(Corrector, ScalarCorrection) {
+ double residuals = sqrt(3.0);
+ double jacobian = 10.0;
+ double sq_norm = residuals * residuals;
+
+ const double kRho[] = {sq_norm, 0.1, -0.01};
+
+ // In light of the rho'' < 0 clamping now implemented in
+ // corrector.cc, alpha = 0 whenever rho'' < 0.
+ const double kAlpha = 0.0;
+
+ // Thus the expected value of the residual is
+ // residual[i] * sqrt(kRho[1]) / (1.0 - kAlpha).
+ const double kExpectedResidual =
+ residuals * sqrt(kRho[1]) / (1 - kAlpha);
+
+ // The jacobian in this case will be
+ // sqrt(kRho[1]) * (1 - kAlpha) * jacobian.
+ const double kExpectedJacobian = sqrt(kRho[1]) * (1 - kAlpha) * jacobian;
+
+ Corrector c(sq_norm, kRho);
+ c.CorrectJacobian(1.0, 1.0, &residuals, &jacobian);
+ c.CorrectResiduals(1.0, &residuals);
+
+ ASSERT_NEAR(residuals, kExpectedResidual, 1e-6);
+ ASSERT_NEAR(kExpectedJacobian, jacobian, 1e-6);
+}
+
+TEST(Corrector, ScalarCorrectionZeroResidual) {
+ double residuals = 0.0;
+ double jacobian = 10.0;
+ double sq_norm = residuals * residuals;
+
+ const double kRho[] = {0.0, 0.1, -0.01};
+ Corrector c(sq_norm, kRho);
+
+ // The alpha equation is
+ // 1/2 alpha^2 - alpha + 0.0 = 0.
+ // i.e. alpha = 1.0 - sqrt(1.0).
+ // alpha = 0.0.
+ // Thus the expected value of the residual is
+ // residual[i] * sqrt(kRho[1])
+ const double kExpectedResidual = residuals * sqrt(kRho[1]);
+
+ // The jacobian in this case will be
+ // sqrt(kRho[1]) * jacobian.
+ const double kExpectedJacobian = sqrt(kRho[1]) * jacobian;
+
+ c.CorrectJacobian(1, 1, &residuals, &jacobian);
+ c.CorrectResiduals(1, &residuals);
+
+ ASSERT_NEAR(residuals, kExpectedResidual, 1e-6);
+ ASSERT_NEAR(kExpectedJacobian, jacobian, 1e-6);
+}
+
+// Scaling behaviour for one dimensional functions.
+TEST(Corrector, ScalarCorrectionAlphaClamped) {
+ double residuals = sqrt(3.0);
+ double jacobian = 10.0;
+ double sq_norm = residuals * residuals;
+
+ const double kRho[] = {3, 0.1, -0.1};
+
+ // rho[2] < 0 -> alpha = 0.0
+ const double kAlpha = 0.0;
+
+ // Thus the expected value of the residual is
+ // residual[i] * sqrt(kRho[1]) / (1.0 - kAlpha).
+ const double kExpectedResidual =
+ residuals * sqrt(kRho[1]) / (1.0 - kAlpha);
+
+ // The jacobian in this case will be scaled by
+ // sqrt(rho[1]) * (1 - alpha) * J.
+ const double kExpectedJacobian = sqrt(kRho[1]) *
+ (1.0 - kAlpha) * jacobian;
+
+ Corrector c(sq_norm, kRho);
+ c.CorrectJacobian(1, 1, &residuals, &jacobian);
+ c.CorrectResiduals(1, &residuals);
+
+ ASSERT_NEAR(residuals, kExpectedResidual, 1e-6);
+ ASSERT_NEAR(kExpectedJacobian, jacobian, 1e-6);
+}
+
+// Test that the corrected multidimensional residual and jacobians
+// match the expected values and the resulting modified normal
+// equations match the robustified gauss newton approximation.
+TEST(Corrector, MultidimensionalGaussNewtonApproximation) {
+ double residuals[3];
+ double jacobian[2 * 3];
+ double rho[3];
+
+ // Eigen matrix references for linear algebra.
+ MatrixRef jac(jacobian, 3, 2);
+ VectorRef res(residuals, 3);
+
+ // Ground truth values of the modified jacobian and residuals.
+ Matrix g_jac(3, 2);
+ Vector g_res(3);
+
+ // Ground truth values of the robustified Gauss-Newton
+ // approximation.
+ Matrix g_hess(2, 2);
+ Vector g_grad(2);
+
+ // Corrected hessian and gradient implied by the modified jacobian
+ // and hessians.
+ Matrix c_hess(2, 2);
+ Vector c_grad(2);
+
+ srand(5);
+ for (int iter = 0; iter < 10000; ++iter) {
+ // Initialize the jacobian and residual.
+ for (int i = 0; i < 2 * 3; ++i)
+ jacobian[i] = RandDouble();
+ for (int i = 0; i < 3; ++i)
+ residuals[i] = RandDouble();
+
+ const double sq_norm = res.dot(res);
+
+ rho[0] = sq_norm;
+ rho[1] = RandDouble();
+ rho[2] = 2.0 * RandDouble() - 1.0;
+
+ // If rho[2] > 0, then the curvature correction to the correction
+ // and the gauss newton approximation will match. Otherwise, we
+ // will clamp alpha to 0.
+
+ const double kD = 1 + 2 * rho[2] / rho[1] * sq_norm;
+ const double kAlpha = (rho[2] > 0.0) ? 1 - sqrt(kD) : 0.0;
+
+ // Ground truth values.
+ g_res = sqrt(rho[1]) / (1.0 - kAlpha) * res;
+ g_jac = sqrt(rho[1]) * (jac - kAlpha / sq_norm *
+ res * res.transpose() * jac);
+
+ g_grad = rho[1] * jac.transpose() * res;
+ g_hess = rho[1] * jac.transpose() * jac +
+ 2.0 * rho[2] * jac.transpose() * res * res.transpose() * jac;
+
+ Corrector c(sq_norm, rho);
+ c.CorrectJacobian(3, 2, residuals, jacobian);
+ c.CorrectResiduals(3, residuals);
+
+ // Corrected gradient and hessian.
+ c_grad = jac.transpose() * res;
+ c_hess = jac.transpose() * jac;
+
+ ASSERT_NEAR((g_res - res).norm(), 0.0, 1e-10);
+ ASSERT_NEAR((g_jac - jac).norm(), 0.0, 1e-10);
+
+ ASSERT_NEAR((g_grad - c_grad).norm(), 0.0, 1e-10);
+ }
+}
+
+TEST(Corrector, MultidimensionalGaussNewtonApproximationZeroResidual) {
+ double residuals[3];
+ double jacobian[2 * 3];
+ double rho[3];
+
+ // Eigen matrix references for linear algebra.
+ MatrixRef jac(jacobian, 3, 2);
+ VectorRef res(residuals, 3);
+
+ // Ground truth values of the modified jacobian and residuals.
+ Matrix g_jac(3, 2);
+ Vector g_res(3);
+
+ // Ground truth values of the robustified Gauss-Newton
+ // approximation.
+ Matrix g_hess(2, 2);
+ Vector g_grad(2);
+
+ // Corrected hessian and gradient implied by the modified jacobian
+ // and hessians.
+ Matrix c_hess(2, 2);
+ Vector c_grad(2);
+
+ srand(5);
+ for (int iter = 0; iter < 10000; ++iter) {
+ // Initialize the jacobian.
+ for (int i = 0; i < 2 * 3; ++i)
+ jacobian[i] = RandDouble();
+
+ // Zero residuals
+ res.setZero();
+
+ const double sq_norm = res.dot(res);
+
+ rho[0] = sq_norm;
+ rho[1] = RandDouble();
+ rho[2] = 2 * RandDouble() - 1.0;
+
+ // Ground truth values.
+ g_res = sqrt(rho[1]) * res;
+ g_jac = sqrt(rho[1]) * jac;
+
+ g_grad = rho[1] * jac.transpose() * res;
+ g_hess = rho[1] * jac.transpose() * jac +
+ 2.0 * rho[2] * jac.transpose() * res * res.transpose() * jac;
+
+ Corrector c(sq_norm, rho);
+ c.CorrectJacobian(3, 2, residuals, jacobian);
+ c.CorrectResiduals(3, residuals);
+
+ // Corrected gradient and hessian.
+ c_grad = jac.transpose() * res;
+ c_hess = jac.transpose() * jac;
+
+ ASSERT_NEAR((g_res - res).norm(), 0.0, 1e-10);
+ ASSERT_NEAR((g_jac - jac).norm(), 0.0, 1e-10);
+
+ ASSERT_NEAR((g_grad - c_grad).norm(), 0.0, 1e-10);
+ ASSERT_NEAR((g_hess - c_hess).norm(), 0.0, 1e-10);
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/cxsparse.cc b/internal/ceres/cxsparse.cc
new file mode 100644
index 0000000..21c98e0
--- /dev/null
+++ b/internal/ceres/cxsparse.cc
@@ -0,0 +1,130 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+#ifndef CERES_NO_CXSPARSE
+
+#include "ceres/cxsparse.h"
+
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+CXSparse::CXSparse() : scratch_(NULL), scratch_size_(0) {
+}
+
+CXSparse::~CXSparse() {
+ if (scratch_size_ > 0) {
+ cs_free(scratch_);
+ }
+}
+
+bool CXSparse::SolveCholesky(cs_di* A,
+ cs_dis* symbolic_factorization,
+ double* b) {
+ // Make sure we have enough scratch space available.
+ if (scratch_size_ < A->n) {
+ if (scratch_size_ > 0) {
+ cs_free(scratch_);
+ }
+ scratch_ = reinterpret_cast<CS_ENTRY*>(cs_malloc(A->n, sizeof(CS_ENTRY)));
+ }
+
+ // Solve using Cholesky factorization
+ csn* numeric_factorization = cs_chol(A, symbolic_factorization);
+ if (numeric_factorization == NULL) {
+ LOG(WARNING) << "Cholesky factorization failed.";
+ return false;
+ }
+
+ // When the Cholesky factorization succeeded, these methods are guaranteed to
+ // succeeded as well. In the comments below, "x" refers to the scratch space.
+ //
+ // Set x = P * b.
+ cs_ipvec(symbolic_factorization->pinv, b, scratch_, A->n);
+
+ // Set x = L \ x.
+ cs_lsolve(numeric_factorization->L, scratch_);
+
+ // Set x = L' \ x.
+ cs_ltsolve(numeric_factorization->L, scratch_);
+
+ // Set b = P' * x.
+ cs_pvec(symbolic_factorization->pinv, scratch_, b, A->n);
+
+ // Free Cholesky factorization.
+ cs_nfree(numeric_factorization);
+ return true;
+}
+
+cs_dis* CXSparse::AnalyzeCholesky(cs_di* A) {
+ // order = 1 for Cholesky factorization.
+ return cs_schol(1, A);
+}
+
+cs_di CXSparse::CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A) {
+ cs_di At;
+ At.m = A->num_cols();
+ At.n = A->num_rows();
+ At.nz = -1;
+ At.nzmax = A->num_nonzeros();
+ At.p = A->mutable_rows();
+ At.i = A->mutable_cols();
+ At.x = A->mutable_values();
+ return At;
+}
+
+cs_di* CXSparse::CreateSparseMatrix(TripletSparseMatrix* tsm) {
+ cs_di_sparse tsm_wrapper;
+ tsm_wrapper.nzmax = tsm->num_nonzeros();;
+ tsm_wrapper.nz = tsm->num_nonzeros();;
+ tsm_wrapper.m = tsm->num_rows();
+ tsm_wrapper.n = tsm->num_cols();
+ tsm_wrapper.p = tsm->mutable_cols();
+ tsm_wrapper.i = tsm->mutable_rows();
+ tsm_wrapper.x = tsm->mutable_values();
+
+ return cs_compress(&tsm_wrapper);
+}
+
+void CXSparse::Free(cs_di* factor) {
+ cs_free(factor);
+}
+
+void CXSparse::Free(cs_dis* factor) {
+ cs_sfree(factor);
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_NO_CXSPARSE
diff --git a/internal/ceres/cxsparse.h b/internal/ceres/cxsparse.h
new file mode 100644
index 0000000..d3b64fc
--- /dev/null
+++ b/internal/ceres/cxsparse.h
@@ -0,0 +1,90 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+#ifndef CERES_INTERNAL_CXSPARSE_H_
+#define CERES_INTERNAL_CXSPARSE_H_
+
+#ifndef CERES_NO_CXSPARSE
+
+#include "cs.h"
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+class TripletSparseMatrix;
+
+// This object provides access to solving linear systems using Cholesky
+// factorization with a known symbolic factorization. This features does not
+// explicity exist in CXSparse. The methods in the class are nonstatic because
+// the class manages internal scratch space.
+class CXSparse {
+ public:
+ CXSparse();
+ ~CXSparse();
+
+ // Solves a symmetric linear system A * x = b using Cholesky factorization.
+ // A - The system matrix.
+ // symbolic_factorization - The symbolic factorization of A. This is obtained
+ // from AnalyzeCholesky.
+ // b - The right hand size of the linear equation. This
+ // array will also recieve the solution.
+ // Returns false if Cholesky factorization of A fails.
+ bool SolveCholesky(cs_di* A, cs_dis* symbolic_factorization, double* b);
+
+ // Creates a sparse matrix from a compressed-column form. No memory is
+ // allocated or copied; the structure A is filled out with info from the
+ // argument.
+ cs_di CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
+
+ // Creates a new matrix from a triplet form. Deallocate the returned matrix
+ // with Free. May return NULL if the compression or allocation fails.
+ cs_di* CreateSparseMatrix(TripletSparseMatrix* A);
+
+ // Computes a symbolic factorization of A that can be used in SolveCholesky.
+ // The returned matrix should be deallocated with Free when not used anymore.
+ cs_dis* AnalyzeCholesky(cs_di* A);
+
+ // Deallocates the memory of a matrix obtained from AnalyzeCholesky.
+ void Free(cs_di* factor);
+ void Free(cs_dis* factor);
+
+ private:
+ // Cached scratch space
+ CS_ENTRY* scratch_;
+ int scratch_size_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_NO_CXSPARSE
+
+#endif // CERES_INTERNAL_CXSPARSE_H_
diff --git a/internal/ceres/dense_jacobian_writer.h b/internal/ceres/dense_jacobian_writer.h
new file mode 100644
index 0000000..be743a8
--- /dev/null
+++ b/internal/ceres/dense_jacobian_writer.h
@@ -0,0 +1,111 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A jacobian writer that writes to dense Eigen matrices.
+
+#ifndef CERES_INTERNAL_DENSE_JACOBIAN_WRITER_H_
+#define CERES_INTERNAL_DENSE_JACOBIAN_WRITER_H_
+
+#include "ceres/casts.h"
+#include "ceres/dense_sparse_matrix.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/scratch_evaluate_preparer.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+class DenseJacobianWriter {
+ public:
+ DenseJacobianWriter(Evaluator::Options /* ignored */,
+ Program* program)
+ : program_(program) {
+ }
+
+ // JacobianWriter interface.
+
+ // Since the dense matrix has different layout than that assumed by the cost
+ // functions, use scratch space to store the jacobians temporarily then copy
+ // them over to the larger jacobian later.
+ ScratchEvaluatePreparer* CreateEvaluatePreparers(int num_threads) {
+ return ScratchEvaluatePreparer::Create(*program_, num_threads);
+ }
+
+ SparseMatrix* CreateJacobian() const {
+ return new DenseSparseMatrix(program_->NumResiduals(),
+ program_->NumEffectiveParameters(),
+ true);
+ }
+
+ void Write(int residual_id,
+ int residual_offset,
+ double **jacobians,
+ SparseMatrix* jacobian) {
+ DenseSparseMatrix* dense_jacobian;
+ if (jacobian != NULL) {
+ dense_jacobian = down_cast<DenseSparseMatrix*>(jacobian);
+ }
+ const ResidualBlock* residual_block =
+ program_->residual_blocks()[residual_id];
+ int num_parameter_blocks = residual_block->NumParameterBlocks();
+ int num_residuals = residual_block->NumResiduals();
+
+ // Now copy the jacobians for each parameter into the dense jacobian matrix.
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+
+ // If the parameter block is fixed, then there is nothing to do.
+ if (parameter_block->IsConstant()) {
+ continue;
+ }
+
+ const int parameter_block_size = parameter_block->LocalSize();
+ ConstMatrixRef parameter_jacobian(jacobians[j],
+ num_residuals,
+ parameter_block_size);
+
+ dense_jacobian->mutable_matrix().block(
+ residual_offset,
+ parameter_block->delta_offset(),
+ num_residuals,
+ parameter_block_size) = parameter_jacobian;
+ }
+ }
+
+ private:
+ Program* program_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DENSE_JACOBIAN_WRITER_H_
diff --git a/internal/ceres/dense_normal_cholesky_solver.cc b/internal/ceres/dense_normal_cholesky_solver.cc
new file mode 100644
index 0000000..f6bb99a
--- /dev/null
+++ b/internal/ceres/dense_normal_cholesky_solver.cc
@@ -0,0 +1,86 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/dense_normal_cholesky_solver.h"
+
+#include <cstddef>
+
+#include "Eigen/Dense"
+#include "ceres/dense_sparse_matrix.h"
+#include "ceres/linear_solver.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+DenseNormalCholeskySolver::DenseNormalCholeskySolver(
+ const LinearSolver::Options& options)
+ : options_(options) {}
+
+LinearSolver::Summary DenseNormalCholeskySolver::SolveImpl(
+ DenseSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) {
+ const int num_rows = A->num_rows();
+ const int num_cols = A->num_cols();
+
+ ConstAlignedMatrixRef Aref = A->matrix();
+ Matrix lhs(num_cols, num_cols);
+ lhs.setZero();
+
+ // lhs += A'A
+ //
+ // Using rankUpdate instead of GEMM, exposes the fact that its the
+ // same matrix being multiplied with itself and that the product is
+ // symmetric.
+ lhs.selfadjointView<Eigen::Upper>().rankUpdate(Aref.transpose());
+
+ // rhs = A'b
+ Vector rhs = Aref.transpose() * ConstVectorRef(b, num_rows);
+
+ if (per_solve_options.D != NULL) {
+ ConstVectorRef D(per_solve_options.D, num_cols);
+ lhs += D.array().square().matrix().asDiagonal();
+ }
+
+ VectorRef(x, num_cols) =
+ lhs.selfadjointView<Eigen::Upper>().ldlt().solve(rhs);
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 1;
+ summary.termination_type = TOLERANCE;
+ return summary;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/dense_normal_cholesky_solver.h b/internal/ceres/dense_normal_cholesky_solver.h
new file mode 100644
index 0000000..de47740
--- /dev/null
+++ b/internal/ceres/dense_normal_cholesky_solver.h
@@ -0,0 +1,95 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Solve dense rectangular systems Ax = b by forming the normal
+// equations and solving them using the Cholesky factorization.
+
+#ifndef CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_
+#define CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_
+
+#include "ceres/linear_solver.h"
+#include "ceres/internal/macros.h"
+
+namespace ceres {
+namespace internal {
+
+class DenseSparseMatrix;
+
+// This class implements the LinearSolver interface for solving
+// rectangular/unsymmetric (well constrained) linear systems of the
+// form
+//
+// Ax = b
+//
+// Since there does not usually exist a solution that satisfies these
+// equations, the solver instead solves the linear least squares
+// problem
+//
+// min_x |Ax - b|^2
+//
+// Setting the gradient of the above optimization problem to zero
+// gives us the normal equations
+//
+// A'Ax = A'b
+//
+// A'A is a positive definite matrix (hopefully), and the resulting
+// linear system can be solved using Cholesky factorization.
+//
+// If the PerSolveOptions struct has a non-null array D, then the
+// augmented/regularized linear system
+//
+// [ A ]x = [b]
+// [ diag(D) ] [0]
+//
+// is solved.
+//
+// This class uses the LDLT factorization routines from the Eigen
+// library. This solver always returns a solution, it is the user's
+// responsibility to judge if the solution is good enough for their
+// purposes.
+class DenseNormalCholeskySolver: public DenseSparseMatrixSolver {
+ public:
+ explicit DenseNormalCholeskySolver(const LinearSolver::Options& options);
+
+ private:
+ virtual LinearSolver::Summary SolveImpl(
+ DenseSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x);
+
+ const LinearSolver::Options options_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(DenseNormalCholeskySolver);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_
diff --git a/internal/ceres/dense_qr_solver.cc b/internal/ceres/dense_qr_solver.cc
new file mode 100644
index 0000000..7aec450
--- /dev/null
+++ b/internal/ceres/dense_qr_solver.cc
@@ -0,0 +1,90 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/dense_qr_solver.h"
+
+#include <cstddef>
+
+#include "Eigen/Dense"
+#include "ceres/dense_sparse_matrix.h"
+#include "ceres/linear_solver.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+DenseQRSolver::DenseQRSolver(const LinearSolver::Options& options)
+ : options_(options) {}
+
+LinearSolver::Summary DenseQRSolver::SolveImpl(
+ DenseSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) {
+ const int num_rows = A->num_rows();
+ const int num_cols = A->num_cols();
+ VLOG(2) << "DenseQRSolver: "
+ << num_rows << " x " << num_cols << " system.";
+
+ if (per_solve_options.D != NULL) {
+ // Temporarily append a diagonal block to the A matrix, but undo
+ // it before returning the matrix to the user.
+ A->AppendDiagonal(per_solve_options.D);
+ }
+
+ // rhs = [b;0] to account for the additional rows in the lhs.
+ const int augmented_num_rows = num_rows + ((per_solve_options.D != NULL) ? num_cols : 0);
+ if (rhs_.rows() != augmented_num_rows) {
+ rhs_.resize(augmented_num_rows);
+ rhs_.setZero();
+ }
+ rhs_.head(num_rows) = ConstVectorRef(b, num_rows);
+
+ // Solve the system.
+ VectorRef(x, num_cols) = A->matrix().colPivHouseholderQr().solve(rhs_);
+
+ if (per_solve_options.D != NULL) {
+ // Undo the modifications to the matrix A.
+ A->RemoveDiagonal();
+ }
+
+ // We always succeed, since the QR solver returns the best solution
+ // it can. It is the job of the caller to determine if the solution
+ // is good enough or not.
+ LinearSolver::Summary summary;
+ summary.num_iterations = 1;
+ summary.termination_type = TOLERANCE;
+ return summary;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/dense_qr_solver.h b/internal/ceres/dense_qr_solver.h
new file mode 100644
index 0000000..f78fa72
--- /dev/null
+++ b/internal/ceres/dense_qr_solver.h
@@ -0,0 +1,101 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Solve dense rectangular systems Ax = b using the QR factorization.
+#ifndef CERES_INTERNAL_DENSE_QR_SOLVER_H_
+#define CERES_INTERNAL_DENSE_QR_SOLVER_H_
+
+#include "ceres/linear_solver.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/macros.h"
+
+namespace ceres {
+namespace internal {
+
+class DenseSparseMatrix;
+
+// This class implements the LinearSolver interface for solving
+// rectangular/unsymmetric (well constrained) linear systems of the
+// form
+//
+// Ax = b
+//
+// Since there does not usually exist a solution that satisfies these
+// equations, the solver instead solves the linear least squares
+// problem
+//
+// min_x |Ax - b|^2
+//
+// The solution strategy is based on computing the QR decomposition of
+// A, i.e.
+//
+// A = QR
+//
+// Where Q is an orthonormal matrix and R is an upper triangular
+// matrix. Then
+//
+// Ax = b
+// QRx = b
+// Q'QRx = Q'b
+// Rx = Q'b
+// x = R^{-1} Q'b
+//
+// If the PerSolveOptions struct has a non-null array D, then the
+// augmented/regularized linear system
+//
+// [ A ]x = [b]
+// [ diag(D) ] [0]
+//
+// is solved.
+//
+// This class uses the dense QR factorization routines from the Eigen
+// library. This solver always returns a solution, it is the user's
+// responsibility to judge if the solution is good enough for their
+// purposes.
+class DenseQRSolver: public DenseSparseMatrixSolver {
+ public:
+ explicit DenseQRSolver(const LinearSolver::Options& options);
+
+ private:
+ virtual LinearSolver::Summary SolveImpl(
+ DenseSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x);
+
+ const LinearSolver::Options options_;
+ Vector rhs_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(DenseQRSolver);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DENSE_QR_SOLVER_H_
diff --git a/internal/ceres/dense_sparse_matrix.cc b/internal/ceres/dense_sparse_matrix.cc
new file mode 100644
index 0000000..7fb7b83
--- /dev/null
+++ b/internal/ceres/dense_sparse_matrix.cc
@@ -0,0 +1,209 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/dense_sparse_matrix.h"
+
+#include <algorithm>
+#include "ceres/matrix_proto.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+DenseSparseMatrix::DenseSparseMatrix(int num_rows, int num_cols)
+ : has_diagonal_appended_(false),
+ has_diagonal_reserved_(false) {
+ // Allocate enough space for the diagonal.
+ m_.resize(num_rows, num_cols);
+ m_.setZero();
+}
+
+DenseSparseMatrix::DenseSparseMatrix(int num_rows, int num_cols, bool reserve_diagonal)
+ : has_diagonal_appended_(false),
+ has_diagonal_reserved_(reserve_diagonal) {
+ // Allocate enough space for the diagonal.
+ if (reserve_diagonal) {
+ m_.resize(num_rows + num_cols, num_cols);
+ } else {
+ m_.resize(num_rows, num_cols);
+ }
+ m_.setZero();
+}
+
+DenseSparseMatrix::DenseSparseMatrix(const TripletSparseMatrix& m)
+ : m_(Eigen::MatrixXd::Zero(m.num_rows(), m.num_cols())),
+ has_diagonal_appended_(false),
+ has_diagonal_reserved_(false) {
+ const double *values = m.values();
+ const int *rows = m.rows();
+ const int *cols = m.cols();
+ int num_nonzeros = m.num_nonzeros();
+
+ for (int i = 0; i < num_nonzeros; ++i) {
+ m_(rows[i], cols[i]) += values[i];
+ }
+}
+
+DenseSparseMatrix::DenseSparseMatrix(const Matrix& m)
+ : m_(m),
+ has_diagonal_appended_(false),
+ has_diagonal_reserved_(false) {
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+DenseSparseMatrix::DenseSparseMatrix(const SparseMatrixProto& outer_proto)
+ : m_(Eigen::MatrixXd::Zero(
+ outer_proto.dense_matrix().num_rows(),
+ outer_proto.dense_matrix().num_cols())),
+ has_diagonal_appended_(false),
+ has_diagonal_reserved_(false) {
+ const DenseSparseMatrixProto& proto = outer_proto.dense_matrix();
+ for (int i = 0; i < m_.rows(); ++i) {
+ for (int j = 0; j < m_.cols(); ++j) {
+ m_(i, j) = proto.values(m_.cols() * i + j);
+ }
+ }
+}
+#endif
+
+void DenseSparseMatrix::SetZero() {
+ m_.setZero();
+}
+
+void DenseSparseMatrix::RightMultiply(const double* x, double* y) const {
+ VectorRef(y, num_rows()) += matrix() * ConstVectorRef(x, num_cols());
+}
+
+void DenseSparseMatrix::LeftMultiply(const double* x, double* y) const {
+ VectorRef(y, num_cols()) +=
+ matrix().transpose() * ConstVectorRef(x, num_rows());
+}
+
+void DenseSparseMatrix::SquaredColumnNorm(double* x) const {
+ VectorRef(x, num_cols()) = m_.colwise().squaredNorm();
+}
+
+void DenseSparseMatrix::ScaleColumns(const double* scale) {
+ m_ *= ConstVectorRef(scale, num_cols()).asDiagonal();
+}
+
+void DenseSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
+ *dense_matrix = m_;
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+void DenseSparseMatrix::ToProto(SparseMatrixProto* outer_proto) const {
+ CHECK(!has_diagonal_appended_) << "Not supported.";
+ outer_proto->Clear();
+ DenseSparseMatrixProto* proto = outer_proto->mutable_dense_matrix();
+
+ proto->set_num_rows(num_rows());
+ proto->set_num_cols(num_cols());
+
+ int num_nnz = num_nonzeros();
+ for (int i = 0; i < num_nnz; ++i) {
+ proto->add_values(m_.data()[i]);
+ }
+}
+#endif
+
+void DenseSparseMatrix::AppendDiagonal(double *d) {
+ CHECK(!has_diagonal_appended_);
+ if (!has_diagonal_reserved_) {
+ Matrix tmp = m_;
+ m_.resize(m_.rows() + m_.cols(), m_.cols());
+ m_.setZero();
+ m_.block(0, 0, tmp.rows(), tmp.cols()) = tmp;
+ has_diagonal_reserved_ = true;
+ }
+
+ m_.bottomLeftCorner(m_.cols(), m_.cols()) =
+ ConstVectorRef(d, m_.cols()).asDiagonal();
+ has_diagonal_appended_ = true;
+}
+
+void DenseSparseMatrix::RemoveDiagonal() {
+ CHECK(has_diagonal_appended_);
+ has_diagonal_appended_ = false;
+ // Leave the diagonal reserved.
+}
+
+int DenseSparseMatrix::num_rows() const {
+ if (has_diagonal_reserved_ && !has_diagonal_appended_) {
+ return m_.rows() - m_.cols();
+ }
+ return m_.rows();
+}
+
+int DenseSparseMatrix::num_cols() const {
+ return m_.cols();
+}
+
+int DenseSparseMatrix::num_nonzeros() const {
+ if (has_diagonal_reserved_ && !has_diagonal_appended_) {
+ return (m_.rows() - m_.cols()) * m_.cols();
+ }
+ return m_.rows() * m_.cols();
+}
+
+ConstAlignedMatrixRef DenseSparseMatrix::matrix() const {
+ if (has_diagonal_reserved_ && !has_diagonal_appended_) {
+ return ConstAlignedMatrixRef(
+ m_.data(), m_.rows() - m_.cols(), m_.cols());
+ }
+ return ConstAlignedMatrixRef(m_.data(), m_.rows(), m_.cols());
+}
+
+AlignedMatrixRef DenseSparseMatrix::mutable_matrix() {
+ if (has_diagonal_reserved_ && !has_diagonal_appended_) {
+ return AlignedMatrixRef(
+ m_.data(), m_.rows() - m_.cols(), m_.cols());
+ }
+ return AlignedMatrixRef(m_.data(), m_.rows(), m_.cols());
+}
+
+void DenseSparseMatrix::ToTextFile(FILE* file) const {
+ CHECK_NOTNULL(file);
+ const int active_rows =
+ (has_diagonal_reserved_ && !has_diagonal_appended_)
+ ? (m_.rows() - m_.cols())
+ : m_.rows();
+
+ for (int r = 0; r < active_rows; ++r) {
+ for (int c = 0; c < m_.cols(); ++c) {
+ fprintf(file, "% 10d % 10d %17f\n", r, c, m_(r, c));
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/dense_sparse_matrix.h b/internal/ceres/dense_sparse_matrix.h
new file mode 100644
index 0000000..1e4d499
--- /dev/null
+++ b/internal/ceres/dense_sparse_matrix.h
@@ -0,0 +1,117 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A dense matrix implemented under the SparseMatrix interface.
+
+#ifndef CERES_INTERNAL_DENSE_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_DENSE_SPARSE_MATRIX_H_
+
+#include <glog/logging.h>
+#include "ceres/sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/macros.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class SparseMatrixProto;
+class TripletSparseMatrix;
+
+class DenseSparseMatrix : public SparseMatrix {
+ public:
+ // Build a matrix with the same content as the TripletSparseMatrix
+ // m. This assumes that m does not have any repeated entries.
+ explicit DenseSparseMatrix(const TripletSparseMatrix& m);
+ explicit DenseSparseMatrix(const Matrix& m);
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+ explicit DenseSparseMatrix(const SparseMatrixProto& proto);
+#endif
+
+ DenseSparseMatrix(int num_rows, int num_cols);
+ DenseSparseMatrix(int num_rows, int num_cols, bool reserve_diagonal);
+
+ virtual ~DenseSparseMatrix() {}
+
+ // SparseMatrix interface.
+ virtual void SetZero();
+ virtual void RightMultiply(const double* x, double* y) const;
+ virtual void LeftMultiply(const double* x, double* y) const;
+ virtual void SquaredColumnNorm(double* x) const;
+ virtual void ScaleColumns(const double* scale);
+ virtual void ToDenseMatrix(Matrix* dense_matrix) const;
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+ virtual void ToProto(SparseMatrixProto* proto) const;
+#endif
+ virtual void ToTextFile(FILE* file) const;
+ virtual int num_rows() const;
+ virtual int num_cols() const;
+ virtual int num_nonzeros() const;
+ virtual const double* values() const { return m_.data(); }
+ virtual double* mutable_values() { return m_.data(); }
+
+ ConstAlignedMatrixRef matrix() const;
+ AlignedMatrixRef mutable_matrix();
+
+ // Only one diagonal can be appended at a time. The diagonal is appended to
+ // as a new set of rows, e.g.
+ //
+ // Original matrix:
+ //
+ // x x x
+ // x x x
+ // x x x
+ //
+ // After append diagonal (1, 2, 3):
+ //
+ // x x x
+ // x x x
+ // x x x
+ // 1 0 0
+ // 0 2 0
+ // 0 0 3
+ //
+ // Calling RemoveDiagonal removes the block. It is a fatal error to append a
+ // diagonal to a matrix that already has an appended diagonal, and it is also
+ // a fatal error to remove a diagonal from a matrix that has none.
+ void AppendDiagonal(double *d);
+ void RemoveDiagonal();
+
+ private:
+ Matrix m_;
+ bool has_diagonal_appended_;
+ bool has_diagonal_reserved_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DENSE_SPARSE_MATRIX_H_
diff --git a/internal/ceres/dense_sparse_matrix_test.cc b/internal/ceres/dense_sparse_matrix_test.cc
new file mode 100644
index 0000000..354357f
--- /dev/null
+++ b/internal/ceres/dense_sparse_matrix_test.cc
@@ -0,0 +1,232 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// TODO(keir): Implement a generic "compare sparse matrix implementations" test
+// suite that can compare all the implementations. Then this file would shrink
+// in size.
+
+#include "ceres/dense_sparse_matrix.h"
+
+#include "gtest/gtest.h"
+#include "ceres/casts.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/matrix_proto.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+void CompareMatrices(const SparseMatrix* a, const SparseMatrix* b) {
+ EXPECT_EQ(a->num_rows(), b->num_rows());
+ EXPECT_EQ(a->num_cols(), b->num_cols());
+
+ int num_rows = a->num_rows();
+ int num_cols = a->num_cols();
+
+ for (int i = 0; i < num_cols; ++i) {
+ Vector x = Vector::Zero(num_cols);
+ x(i) = 1.0;
+
+ Vector y_a = Vector::Zero(num_rows);
+ Vector y_b = Vector::Zero(num_rows);
+
+ a->RightMultiply(x.data(), y_a.data());
+ b->RightMultiply(x.data(), y_b.data());
+
+ EXPECT_EQ((y_a - y_b).norm(), 0);
+ }
+}
+
+class DenseSparseMatrixTest : public ::testing::Test {
+ protected :
+ virtual void SetUp() {
+ scoped_ptr<LinearLeastSquaresProblem> problem(
+ CreateLinearLeastSquaresProblemFromId(1));
+
+ CHECK_NOTNULL(problem.get());
+
+ tsm.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
+ dsm.reset(new DenseSparseMatrix(*tsm));
+
+ num_rows = tsm->num_rows();
+ num_cols = tsm->num_cols();
+ }
+
+ int num_rows;
+ int num_cols;
+
+ scoped_ptr<TripletSparseMatrix> tsm;
+ scoped_ptr<DenseSparseMatrix> dsm;
+};
+
+TEST_F(DenseSparseMatrixTest, RightMultiply) {
+ CompareMatrices(tsm.get(), dsm.get());
+
+ // Try with a not entirely zero vector to verify column interactions, which
+ // could be masked by a subtle bug when using the elementary vectors.
+ Vector a(num_cols);
+ for (int i = 0; i < num_cols; i++) {
+ a(i) = i;
+ }
+ Vector b1 = Vector::Zero(num_rows);
+ Vector b2 = Vector::Zero(num_rows);
+
+ tsm->RightMultiply(a.data(), b1.data());
+ dsm->RightMultiply(a.data(), b2.data());
+
+ EXPECT_EQ((b1 - b2).norm(), 0);
+}
+
+TEST_F(DenseSparseMatrixTest, LeftMultiply) {
+ for (int i = 0; i < num_rows; ++i) {
+ Vector a = Vector::Zero(num_rows);
+ a(i) = 1.0;
+
+ Vector b1 = Vector::Zero(num_cols);
+ Vector b2 = Vector::Zero(num_cols);
+
+ tsm->LeftMultiply(a.data(), b1.data());
+ dsm->LeftMultiply(a.data(), b2.data());
+
+ EXPECT_EQ((b1 - b2).norm(), 0);
+ }
+
+ // Try with a not entirely zero vector to verify column interactions, which
+ // could be masked by a subtle bug when using the elementary vectors.
+ Vector a(num_rows);
+ for (int i = 0; i < num_rows; i++) {
+ a(i) = i;
+ }
+ Vector b1 = Vector::Zero(num_cols);
+ Vector b2 = Vector::Zero(num_cols);
+
+ tsm->LeftMultiply(a.data(), b1.data());
+ dsm->LeftMultiply(a.data(), b2.data());
+
+ EXPECT_EQ((b1 - b2).norm(), 0);
+}
+
+TEST_F(DenseSparseMatrixTest, ColumnNorm) {
+ Vector b1 = Vector::Zero(num_cols);
+ Vector b2 = Vector::Zero(num_cols);
+
+ tsm->SquaredColumnNorm(b1.data());
+ dsm->SquaredColumnNorm(b2.data());
+
+ EXPECT_EQ((b1 - b2).norm(), 0);
+}
+
+TEST_F(DenseSparseMatrixTest, Scale) {
+ Vector scale(num_cols);
+ for (int i = 0; i < num_cols; ++i) {
+ scale(i) = i + 1;
+ }
+ tsm->ScaleColumns(scale.data());
+ dsm->ScaleColumns(scale.data());
+ CompareMatrices(tsm.get(), dsm.get());
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+TEST_F(DenseSparseMatrixTest, Serialization) {
+ SparseMatrixProto proto;
+ dsm->ToProto(&proto);
+
+ DenseSparseMatrix n(proto);
+ ASSERT_EQ(dsm->num_rows(), n.num_rows());
+ ASSERT_EQ(dsm->num_cols(), n.num_cols());
+ ASSERT_EQ(dsm->num_nonzeros(), n.num_nonzeros());
+
+ for (int i = 0; i < n.num_rows() + 1; ++i) {
+ ASSERT_EQ(dsm->values()[i], proto.dense_matrix().values(i));
+ }
+}
+#endif
+
+TEST_F(DenseSparseMatrixTest, ToDenseMatrix) {
+ Matrix tsm_dense;
+ Matrix dsm_dense;
+
+ tsm->ToDenseMatrix(&tsm_dense);
+ dsm->ToDenseMatrix(&dsm_dense);
+
+ EXPECT_EQ((tsm_dense - dsm_dense).norm(), 0.0);
+}
+
+// TODO(keir): Make this work without protocol buffers.
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+TEST_F(DenseSparseMatrixTest, AppendDiagonal) {
+ DenseSparseMatrixProto proto;
+ proto.set_num_rows(3);
+ proto.set_num_cols(3);
+ for (int i = 0; i < 9; ++i) {
+ proto.add_values(i);
+ }
+ SparseMatrixProto outer_proto;
+ *outer_proto.mutable_dense_matrix() = proto;
+
+ DenseSparseMatrix dsm(outer_proto);
+
+ double diagonal[] = { 10, 11, 12 };
+ dsm.AppendDiagonal(diagonal);
+
+ // Verify the diagonal got added.
+ Matrix m = dsm.matrix();
+ EXPECT_EQ(6, m.rows());
+ EXPECT_EQ(3, m.cols());
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ EXPECT_EQ(3 * i + j, m(i, j));
+ if (i == j) {
+ EXPECT_EQ(10 + i, m(i + 3, j));
+ } else {
+ EXPECT_EQ(0, m(i + 3, j));
+ }
+ }
+ }
+
+ // Verify the diagonal gets removed.
+ dsm.RemoveDiagonal();
+ m = dsm.matrix();
+
+ EXPECT_EQ(3, m.rows());
+ EXPECT_EQ(3, m.cols());
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ EXPECT_EQ(3 * i + j, m(i, j));
+ }
+ }
+}
+#endif
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/detect_structure.cc b/internal/ceres/detect_structure.cc
new file mode 100644
index 0000000..ea5bf2e
--- /dev/null
+++ b/internal/ceres/detect_structure.cc
@@ -0,0 +1,114 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/detect_structure.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+void DetectStructure(const CompressedRowBlockStructure& bs,
+ const int num_eliminate_blocks,
+ int* row_block_size,
+ int* e_block_size,
+ int* f_block_size) {
+ const int num_row_blocks = bs.rows.size();
+ *row_block_size = 0;
+ *e_block_size = 0;
+ *f_block_size = 0;
+
+ // Iterate over row blocks of the matrix, checking if row_block,
+ // e_block or f_block sizes remain constant.
+ for (int r = 0; r < num_row_blocks; ++r) {
+ const CompressedRow& row = bs.rows[r];
+ // We do not care about the sizes of the blocks in rows which do
+ // not contain e_blocks.
+ if (row.cells.front().block_id >= num_eliminate_blocks) {
+ break;
+ }
+ const int e_block_id = row.cells.front().block_id;
+
+ if (*row_block_size == 0) {
+ *row_block_size = row.block.size;
+ } else if (*row_block_size != Eigen::Dynamic &&
+ *row_block_size != row.block.size) {
+ VLOG(2) << "Dynamic row block size because the block size changed from "
+ << *row_block_size << " to "
+ << row.block.size;
+ *row_block_size = Eigen::Dynamic;
+ }
+
+
+ if (*e_block_size == 0) {
+ *e_block_size = bs.cols[e_block_id].size;
+ } else if (*e_block_size != Eigen::Dynamic &&
+ *e_block_size != bs.cols[e_block_id].size) {
+ VLOG(2) << "Dynamic e block size because the block size changed from "
+ << *e_block_size << " to "
+ << bs.cols[e_block_id].size;
+ *e_block_size = Eigen::Dynamic;
+ }
+
+ if (*f_block_size == 0) {
+ if (row.cells.size() > 1) {
+ const int f_block_id = row.cells[1].block_id;
+ *f_block_size = bs.cols[f_block_id].size;
+ }
+ } else if (*f_block_size != Eigen::Dynamic) {
+ for (int c = 1; c < row.cells.size(); ++c) {
+ if (*f_block_size != bs.cols[row.cells[c].block_id].size) {
+ VLOG(2) << "Dynamic f block size because the block size "
+ << "changed from " << *f_block_size << " to "
+ << bs.cols[row.cells[c].block_id].size;
+ *f_block_size = Eigen::Dynamic;
+ break;
+ }
+ }
+ }
+
+ const bool is_everything_dynamic = (*row_block_size == Eigen::Dynamic &&
+ *e_block_size == Eigen::Dynamic &&
+ *f_block_size == Eigen::Dynamic);
+ if (is_everything_dynamic) {
+ break;
+ }
+ }
+
+ CHECK_NE(*row_block_size, 0) << "No rows found";
+ CHECK_NE(*e_block_size, 0) << "No e type blocks found";
+ VLOG(1) << "Schur complement static structure <"
+ << *row_block_size << ","
+ << *e_block_size << ","
+ << *f_block_size << ">.";
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/detect_structure.h b/internal/ceres/detect_structure.h
new file mode 100644
index 0000000..5f8e1b4
--- /dev/null
+++ b/internal/ceres/detect_structure.h
@@ -0,0 +1,63 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_DETECT_STRUCTURE_H_
+#define CERES_INTERNAL_DETECT_STRUCTURE_H_
+
+#include "ceres/block_structure.h"
+
+namespace ceres {
+namespace internal {
+
+// Detect static blocks in the problem sparsity. For rows containing
+// e_blocks, we are interested in detecting if the size of the row
+// blocks, e_blocks and the f_blocks remain constant. If they do, then
+// we can use template specialization to improve the performance of
+// the block level linear algebra operations used by the
+// SchurEliminator.
+//
+// If a block size is not constant, we return Eigen::Dynamic as the
+// value. This just means that the eliminator uses dynamically sized
+// linear algebra operations rather than static operations whose size
+// is known as compile time.
+//
+// For more details about e_blocks and f_blocks, see
+// schur_eliminator.h. This information is used to initialized an
+// appropriate template specialization of SchurEliminator.
+void DetectStructure(const CompressedRowBlockStructure& bs,
+ const int num_eliminate_blocks,
+ int* row_block_size,
+ int* e_block_size,
+ int* f_block_size);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DETECT_STRUCTURE_H_
diff --git a/internal/ceres/dogleg_strategy.cc b/internal/ceres/dogleg_strategy.cc
new file mode 100644
index 0000000..668fa54
--- /dev/null
+++ b/internal/ceres/dogleg_strategy.cc
@@ -0,0 +1,691 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/dogleg_strategy.h"
+
+#include <cmath>
+#include "Eigen/Dense"
+#include "ceres/array_utils.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/polynomial_solver.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/trust_region_strategy.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+const double kMaxMu = 1.0;
+const double kMinMu = 1e-8;
+}
+
+DoglegStrategy::DoglegStrategy(const TrustRegionStrategy::Options& options)
+ : linear_solver_(options.linear_solver),
+ radius_(options.initial_radius),
+ max_radius_(options.max_radius),
+ min_diagonal_(options.lm_min_diagonal),
+ max_diagonal_(options.lm_max_diagonal),
+ mu_(kMinMu),
+ min_mu_(kMinMu),
+ max_mu_(kMaxMu),
+ mu_increase_factor_(10.0),
+ increase_threshold_(0.75),
+ decrease_threshold_(0.25),
+ dogleg_step_norm_(0.0),
+ reuse_(false),
+ dogleg_type_(options.dogleg_type) {
+ CHECK_NOTNULL(linear_solver_);
+ CHECK_GT(min_diagonal_, 0.0);
+ CHECK_LE(min_diagonal_, max_diagonal_);
+ CHECK_GT(max_radius_, 0.0);
+}
+
+// If the reuse_ flag is not set, then the Cauchy point (scaled
+// gradient) and the new Gauss-Newton step are computed from
+// scratch. The Dogleg step is then computed as interpolation of these
+// two vectors.
+TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
+ const TrustRegionStrategy::PerSolveOptions& per_solve_options,
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step) {
+ CHECK_NOTNULL(jacobian);
+ CHECK_NOTNULL(residuals);
+ CHECK_NOTNULL(step);
+
+ const int n = jacobian->num_cols();
+ if (reuse_) {
+ // Gauss-Newton and gradient vectors are always available, only a
+ // new interpolant need to be computed. For the subspace case,
+ // the subspace and the two-dimensional model are also still valid.
+ switch(dogleg_type_) {
+ case TRADITIONAL_DOGLEG:
+ ComputeTraditionalDoglegStep(step);
+ break;
+
+ case SUBSPACE_DOGLEG:
+ ComputeSubspaceDoglegStep(step);
+ break;
+ }
+ TrustRegionStrategy::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = TOLERANCE;
+ return summary;
+ }
+
+ reuse_ = true;
+ // Check that we have the storage needed to hold the various
+ // temporary vectors.
+ if (diagonal_.rows() != n) {
+ diagonal_.resize(n, 1);
+ gradient_.resize(n, 1);
+ gauss_newton_step_.resize(n, 1);
+ }
+
+ // Vector used to form the diagonal matrix that is used to
+ // regularize the Gauss-Newton solve and that defines the
+ // elliptical trust region
+ //
+ // || D * step || <= radius_ .
+ //
+ jacobian->SquaredColumnNorm(diagonal_.data());
+ for (int i = 0; i < n; ++i) {
+ diagonal_[i] = min(max(diagonal_[i], min_diagonal_), max_diagonal_);
+ }
+ diagonal_ = diagonal_.array().sqrt();
+
+ ComputeGradient(jacobian, residuals);
+ ComputeCauchyPoint(jacobian);
+
+ LinearSolver::Summary linear_solver_summary =
+ ComputeGaussNewtonStep(jacobian, residuals);
+
+ TrustRegionStrategy::Summary summary;
+ summary.residual_norm = linear_solver_summary.residual_norm;
+ summary.num_iterations = linear_solver_summary.num_iterations;
+ summary.termination_type = linear_solver_summary.termination_type;
+
+ if (linear_solver_summary.termination_type != FAILURE) {
+ switch(dogleg_type_) {
+ // Interpolate the Cauchy point and the Gauss-Newton step.
+ case TRADITIONAL_DOGLEG:
+ ComputeTraditionalDoglegStep(step);
+ break;
+
+ // Find the minimum in the subspace defined by the
+ // Cauchy point and the (Gauss-)Newton step.
+ case SUBSPACE_DOGLEG:
+ if (!ComputeSubspaceModel(jacobian)) {
+ summary.termination_type = FAILURE;
+ break;
+ }
+ ComputeSubspaceDoglegStep(step);
+ break;
+ }
+ }
+
+ return summary;
+}
+
+// The trust region is assumed to be elliptical with the
+// diagonal scaling matrix D defined by sqrt(diagonal_).
+// It is implemented by substituting step' = D * step.
+// The trust region for step' is spherical.
+// The gradient, the Gauss-Newton step, the Cauchy point,
+// and all calculations involving the Jacobian have to
+// be adjusted accordingly.
+void DoglegStrategy::ComputeGradient(
+ SparseMatrix* jacobian,
+ const double* residuals) {
+ gradient_.setZero();
+ jacobian->LeftMultiply(residuals, gradient_.data());
+ gradient_.array() /= diagonal_.array();
+}
+
+// The Cauchy point is the global minimizer of the quadratic model
+// along the one-dimensional subspace spanned by the gradient.
+void DoglegStrategy::ComputeCauchyPoint(SparseMatrix* jacobian) {
+ // alpha * -gradient is the Cauchy point.
+ Vector Jg(jacobian->num_rows());
+ Jg.setZero();
+ // The Jacobian is scaled implicitly by computing J * (D^-1 * (D^-1 * g))
+ // instead of (J * D^-1) * (D^-1 * g).
+ Vector scaled_gradient =
+ (gradient_.array() / diagonal_.array()).matrix();
+ jacobian->RightMultiply(scaled_gradient.data(), Jg.data());
+ alpha_ = gradient_.squaredNorm() / Jg.squaredNorm();
+}
+
+// The dogleg step is defined as the intersection of the trust region
+// boundary with the piecewise linear path from the origin to the Cauchy
+// point and then from there to the Gauss-Newton point (global minimizer
+// of the model function). The Gauss-Newton point is taken if it lies
+// within the trust region.
+void DoglegStrategy::ComputeTraditionalDoglegStep(double* dogleg) {
+ VectorRef dogleg_step(dogleg, gradient_.rows());
+
+ // Case 1. The Gauss-Newton step lies inside the trust region, and
+ // is therefore the optimal solution to the trust-region problem.
+ const double gradient_norm = gradient_.norm();
+ const double gauss_newton_norm = gauss_newton_step_.norm();
+ if (gauss_newton_norm <= radius_) {
+ dogleg_step = gauss_newton_step_;
+ dogleg_step_norm_ = gauss_newton_norm;
+ dogleg_step.array() /= diagonal_.array();
+ VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+ return;
+ }
+
+ // Case 2. The Cauchy point and the Gauss-Newton steps lie outside
+ // the trust region. Rescale the Cauchy point to the trust region
+ // and return.
+ if (gradient_norm * alpha_ >= radius_) {
+ dogleg_step = -(radius_ / gradient_norm) * gradient_;
+ dogleg_step_norm_ = radius_;
+ dogleg_step.array() /= diagonal_.array();
+ VLOG(3) << "Cauchy step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+ return;
+ }
+
+ // Case 3. The Cauchy point is inside the trust region and the
+ // Gauss-Newton step is outside. Compute the line joining the two
+ // points and the point on it which intersects the trust region
+ // boundary.
+
+ // a = alpha * -gradient
+ // b = gauss_newton_step
+ const double b_dot_a = -alpha_ * gradient_.dot(gauss_newton_step_);
+ const double a_squared_norm = pow(alpha_ * gradient_norm, 2.0);
+ const double b_minus_a_squared_norm =
+ a_squared_norm - 2 * b_dot_a + pow(gauss_newton_norm, 2);
+
+ // c = a' (b - a)
+ // = alpha * -gradient' gauss_newton_step - alpha^2 |gradient|^2
+ const double c = b_dot_a - a_squared_norm;
+ const double d = sqrt(c * c + b_minus_a_squared_norm *
+ (pow(radius_, 2.0) - a_squared_norm));
+
+ double beta =
+ (c <= 0)
+ ? (d - c) / b_minus_a_squared_norm
+ : (radius_ * radius_ - a_squared_norm) / (d + c);
+ dogleg_step = (-alpha_ * (1.0 - beta)) * gradient_
+ + beta * gauss_newton_step_;
+ dogleg_step_norm_ = dogleg_step.norm();
+ dogleg_step.array() /= diagonal_.array();
+ VLOG(3) << "Dogleg step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+}
+
+// The subspace method finds the minimum of the two-dimensional problem
+//
+// min. 1/2 x' B' H B x + g' B x
+// s.t. || B x ||^2 <= r^2
+//
+// where r is the trust region radius and B is the matrix with unit columns
+// spanning the subspace defined by the steepest descent and Newton direction.
+// This subspace by definition includes the Gauss-Newton point, which is
+// therefore taken if it lies within the trust region.
+void DoglegStrategy::ComputeSubspaceDoglegStep(double* dogleg) {
+ VectorRef dogleg_step(dogleg, gradient_.rows());
+
+ // The Gauss-Newton point is inside the trust region if |GN| <= radius_.
+ // This test is valid even though radius_ is a length in the two-dimensional
+ // subspace while gauss_newton_step_ is expressed in the (scaled)
+ // higher dimensional original space. This is because
+ //
+ // 1. gauss_newton_step_ by definition lies in the subspace, and
+ // 2. the subspace basis is orthonormal.
+ //
+ // As a consequence, the norm of the gauss_newton_step_ in the subspace is
+ // the same as its norm in the original space.
+ const double gauss_newton_norm = gauss_newton_step_.norm();
+ if (gauss_newton_norm <= radius_) {
+ dogleg_step = gauss_newton_step_;
+ dogleg_step_norm_ = gauss_newton_norm;
+ dogleg_step.array() /= diagonal_.array();
+ VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+ return;
+ }
+
+ // The optimum lies on the boundary of the trust region. The above problem
+ // therefore becomes
+ //
+ // min. 1/2 x^T B^T H B x + g^T B x
+ // s.t. || B x ||^2 = r^2
+ //
+ // Notice the equality in the constraint.
+ //
+ // This can be solved by forming the Lagrangian, solving for x(y), where
+ // y is the Lagrange multiplier, using the gradient of the objective, and
+ // putting x(y) back into the constraint. This results in a fourth order
+ // polynomial in y, which can be solved using e.g. the companion matrix.
+ // See the description of MakePolynomialForBoundaryConstrainedProblem for
+ // details. The result is up to four real roots y*, not all of which
+ // correspond to feasible points. The feasible points x(y*) have to be
+ // tested for optimality.
+
+ if (subspace_is_one_dimensional_) {
+ // The subspace is one-dimensional, so both the gradient and
+ // the Gauss-Newton step point towards the same direction.
+ // In this case, we move along the gradient until we reach the trust
+ // region boundary.
+ dogleg_step = -(radius_ / gradient_.norm()) * gradient_;
+ dogleg_step_norm_ = radius_;
+ dogleg_step.array() /= diagonal_.array();
+ VLOG(3) << "Dogleg subspace step size (1D): " << dogleg_step_norm_
+ << " radius: " << radius_;
+ return;
+ }
+
+ Vector2d minimum(0.0, 0.0);
+ if (!FindMinimumOnTrustRegionBoundary(&minimum)) {
+ // For the positive semi-definite case, a traditional dogleg step
+ // is taken in this case.
+ LOG(WARNING) << "Failed to compute polynomial roots. "
+ << "Taking traditional dogleg step instead.";
+ ComputeTraditionalDoglegStep(dogleg);
+ return;
+ }
+
+ // Test first order optimality at the minimum.
+ // The first order KKT conditions state that the minimum x*
+ // has to satisfy either || x* ||^2 < r^2 (i.e. has to lie within
+ // the trust region), or
+ //
+ // (B x* + g) + y x* = 0
+ //
+ // for some positive scalar y.
+ // Here, as it is already known that the minimum lies on the boundary, the
+ // latter condition is tested. To allow for small imprecisions, we test if
+ // the angle between (B x* + g) and -x* is smaller than acos(0.99).
+ // The exact value of the cosine is arbitrary but should be close to 1.
+ //
+ // This condition should not be violated. If it is, the minimum was not
+ // correctly determined.
+ const double kCosineThreshold = 0.99;
+ const Vector2d grad_minimum = subspace_B_ * minimum + subspace_g_;
+ const double cosine_angle = -minimum.dot(grad_minimum) /
+ (minimum.norm() * grad_minimum.norm());
+ if (cosine_angle < kCosineThreshold) {
+ LOG(WARNING) << "First order optimality seems to be violated "
+ << "in the subspace method!\n"
+ << "Cosine of angle between x and B x + g is "
+ << cosine_angle << ".\n"
+ << "Taking a regular dogleg step instead.\n"
+ << "Please consider filing a bug report if this "
+ << "happens frequently or consistently.\n";
+ ComputeTraditionalDoglegStep(dogleg);
+ return;
+ }
+
+ // Create the full step from the optimal 2d solution.
+ dogleg_step = subspace_basis_ * minimum;
+ dogleg_step_norm_ = radius_;
+ dogleg_step.array() /= diagonal_.array();
+ VLOG(3) << "Dogleg subspace step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+}
+
+// Build the polynomial that defines the optimal Lagrange multipliers.
+// Let the Lagrangian be
+//
+// L(x, y) = 0.5 x^T B x + x^T g + y (0.5 x^T x - 0.5 r^2). (1)
+//
+// Stationary points of the Lagrangian are given by
+//
+// 0 = d L(x, y) / dx = Bx + g + y x (2)
+// 0 = d L(x, y) / dy = 0.5 x^T x - 0.5 r^2 (3)
+//
+// For any given y, we can solve (2) for x as
+//
+// x(y) = -(B + y I)^-1 g . (4)
+//
+// As B + y I is 2x2, we form the inverse explicitly:
+//
+// (B + y I)^-1 = (1 / det(B + y I)) adj(B + y I) (5)
+//
+// where adj() denotes adjugation. This should be safe, as B is positive
+// semi-definite and y is necessarily positive, so (B + y I) is indeed
+// invertible.
+// Plugging (5) into (4) and the result into (3), then dividing by 0.5 we
+// obtain
+//
+// 0 = (1 / det(B + y I))^2 g^T adj(B + y I)^T adj(B + y I) g - r^2
+// (6)
+//
+// or
+//
+// det(B + y I)^2 r^2 = g^T adj(B + y I)^T adj(B + y I) g (7a)
+// = g^T adj(B)^T adj(B) g
+// + 2 y g^T adj(B)^T g + y^2 g^T g (7b)
+//
+// as
+//
+// adj(B + y I) = adj(B) + y I = adj(B)^T + y I . (8)
+//
+// The left hand side can be expressed explicitly using
+//
+// det(B + y I) = det(B) + y tr(B) + y^2 . (9)
+//
+// So (7) is a polynomial in y of degree four.
+// Bringing everything back to the left hand side, the coefficients can
+// be read off as
+//
+// y^4 r^2
+// + y^3 2 r^2 tr(B)
+// + y^2 (r^2 tr(B)^2 + 2 r^2 det(B) - g^T g)
+// + y^1 (2 r^2 det(B) tr(B) - 2 g^T adj(B)^T g)
+// + y^0 (r^2 det(B)^2 - g^T adj(B)^T adj(B) g)
+//
+Vector DoglegStrategy::MakePolynomialForBoundaryConstrainedProblem() const {
+ const double detB = subspace_B_.determinant();
+ const double trB = subspace_B_.trace();
+ const double r2 = radius_ * radius_;
+ Matrix2d B_adj;
+ B_adj << subspace_B_(1,1) , -subspace_B_(0,1),
+ -subspace_B_(1,0) , subspace_B_(0,0);
+
+ Vector polynomial(5);
+ polynomial(0) = r2;
+ polynomial(1) = 2.0 * r2 * trB;
+ polynomial(2) = r2 * ( trB * trB + 2.0 * detB ) - subspace_g_.squaredNorm();
+ polynomial(3) = -2.0 * ( subspace_g_.transpose() * B_adj * subspace_g_
+ - r2 * detB * trB );
+ polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm();
+
+ return polynomial;
+}
+
+// Given a Lagrange multiplier y that corresponds to a stationary point
+// of the Lagrangian L(x, y), compute the corresponding x from the
+// equation
+//
+// 0 = d L(x, y) / dx
+// = B * x + g + y * x
+// = (B + y * I) * x + g
+//
+DoglegStrategy::Vector2d DoglegStrategy::ComputeSubspaceStepFromRoot(
+ double y) const {
+ const Matrix2d B_i = subspace_B_ + y * Matrix2d::Identity();
+ return -B_i.partialPivLu().solve(subspace_g_);
+}
+
+// This function evaluates the quadratic model at a point x in the
+// subspace spanned by subspace_basis_.
+double DoglegStrategy::EvaluateSubspaceModel(const Vector2d& x) const {
+ return 0.5 * x.dot(subspace_B_ * x) + subspace_g_.dot(x);
+}
+
+// This function attempts to solve the boundary-constrained subspace problem
+//
+// min. 1/2 x^T B^T H B x + g^T B x
+// s.t. || B x ||^2 = r^2
+//
+// where B is an orthonormal subspace basis and r is the trust-region radius.
+//
+// This is done by finding the roots of a fourth degree polynomial. If the
+// root finding fails, the function returns false and minimum will be set
+// to (0, 0). If it succeeds, true is returned.
+//
+// In the failure case, another step should be taken, such as the traditional
+// dogleg step.
+bool DoglegStrategy::FindMinimumOnTrustRegionBoundary(Vector2d* minimum) const {
+ CHECK_NOTNULL(minimum);
+
+ // Return (0, 0) in all error cases.
+ minimum->setZero();
+
+ // Create the fourth-degree polynomial that is a necessary condition for
+ // optimality.
+ const Vector polynomial = MakePolynomialForBoundaryConstrainedProblem();
+
+ // Find the real parts y_i of its roots (not only the real roots).
+ Vector roots_real;
+ if (!FindPolynomialRoots(polynomial, &roots_real, NULL)) {
+ // Failed to find the roots of the polynomial, i.e. the candidate
+ // solutions of the constrained problem. Report this back to the caller.
+ return false;
+ }
+
+ // For each root y, compute B x(y) and check for feasibility.
+ // Notice that there should always be four roots, as the leading term of
+ // the polynomial is r^2 and therefore non-zero. However, as some roots
+ // may be complex, the real parts are not necessarily unique.
+ double minimum_value = std::numeric_limits<double>::max();
+ bool valid_root_found = false;
+ for (int i = 0; i < roots_real.size(); ++i) {
+ const Vector2d x_i = ComputeSubspaceStepFromRoot(roots_real(i));
+
+ // Not all roots correspond to points on the trust region boundary.
+ // There are at most four candidate solutions. As we are interested
+ // in the minimum, it is safe to consider all of them after projecting
+ // them onto the trust region boundary.
+ if (x_i.norm() > 0) {
+ const double f_i = EvaluateSubspaceModel((radius_ / x_i.norm()) * x_i);
+ valid_root_found = true;
+ if (f_i < minimum_value) {
+ minimum_value = f_i;
+ *minimum = x_i;
+ }
+ }
+ }
+
+ return valid_root_found;
+}
+
+LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep(
+ SparseMatrix* jacobian,
+ const double* residuals) {
+ const int n = jacobian->num_cols();
+ LinearSolver::Summary linear_solver_summary;
+ linear_solver_summary.termination_type = FAILURE;
+
+ // The Jacobian matrix is often quite poorly conditioned. Thus it is
+ // necessary to add a diagonal matrix at the bottom to prevent the
+ // linear solver from failing.
+ //
+ // We do this by computing the same diagonal matrix as the one used
+ // by Levenberg-Marquardt (other choices are possible), and scaling
+ // it by a small constant (independent of the trust region radius).
+ //
+ // If the solve fails, the multiplier to the diagonal is increased
+ // up to max_mu_ by a factor of mu_increase_factor_ every time. If
+ // the linear solver is still not successful, the strategy returns
+ // with FAILURE.
+ //
+ // Next time when a new Gauss-Newton step is requested, the
+ // multiplier starts out from the last successful solve.
+ //
+ // When a step is declared successful, the multiplier is decreased
+ // by half of mu_increase_factor_.
+
+ while (mu_ < max_mu_) {
+ // Dogleg, as far as I (sameeragarwal) understand it, requires a
+ // reasonably good estimate of the Gauss-Newton step. This means
+ // that we need to solve the normal equations more or less
+ // exactly. This is reflected in the values of the tolerances set
+ // below.
+ //
+ // For now, this strategy should only be used with exact
+ // factorization based solvers, for which these tolerances are
+ // automatically satisfied.
+ //
+ // The right way to combine inexact solves with trust region
+ // methods is to use Stiehaug's method.
+ LinearSolver::PerSolveOptions solve_options;
+ solve_options.q_tolerance = 0.0;
+ solve_options.r_tolerance = 0.0;
+
+ lm_diagonal_ = diagonal_ * std::sqrt(mu_);
+ solve_options.D = lm_diagonal_.data();
+
+ // As in the LevenbergMarquardtStrategy, solve Jy = r instead
+ // of Jx = -r and later set x = -y to avoid having to modify
+ // either jacobian or residuals.
+ InvalidateArray(n, gauss_newton_step_.data());
+ linear_solver_summary = linear_solver_->Solve(jacobian,
+ residuals,
+ solve_options,
+ gauss_newton_step_.data());
+
+ if (linear_solver_summary.termination_type == FAILURE ||
+ !IsArrayValid(n, gauss_newton_step_.data())) {
+ mu_ *= mu_increase_factor_;
+ VLOG(2) << "Increasing mu " << mu_;
+ linear_solver_summary.termination_type = FAILURE;
+ continue;
+ }
+ break;
+ }
+
+ if (linear_solver_summary.termination_type != FAILURE) {
+ // The scaled Gauss-Newton step is D * GN:
+ //
+ // - (D^-1 J^T J D^-1)^-1 (D^-1 g)
+ // = - D (J^T J)^-1 D D^-1 g
+ // = D -(J^T J)^-1 g
+ //
+ gauss_newton_step_.array() *= -diagonal_.array();
+ }
+
+ return linear_solver_summary;
+}
+
+void DoglegStrategy::StepAccepted(double step_quality) {
+ CHECK_GT(step_quality, 0.0);
+
+ if (step_quality < decrease_threshold_) {
+ radius_ *= 0.5;
+ }
+
+ if (step_quality > increase_threshold_) {
+ radius_ = max(radius_, 3.0 * dogleg_step_norm_);
+ }
+
+ // Reduce the regularization multiplier, in the hope that whatever
+ // was causing the rank deficiency has gone away and we can return
+ // to doing a pure Gauss-Newton solve.
+ mu_ = max(min_mu_, 2.0 * mu_ / mu_increase_factor_ );
+ reuse_ = false;
+}
+
+void DoglegStrategy::StepRejected(double step_quality) {
+ radius_ *= 0.5;
+ reuse_ = true;
+}
+
+void DoglegStrategy::StepIsInvalid() {
+ mu_ *= mu_increase_factor_;
+ reuse_ = false;
+}
+
+double DoglegStrategy::Radius() const {
+ return radius_;
+}
+
+bool DoglegStrategy::ComputeSubspaceModel(SparseMatrix* jacobian) {
+ // Compute an orthogonal basis for the subspace using QR decomposition.
+ Matrix basis_vectors(jacobian->num_cols(), 2);
+ basis_vectors.col(0) = gradient_;
+ basis_vectors.col(1) = gauss_newton_step_;
+ Eigen::ColPivHouseholderQR<Matrix> basis_qr(basis_vectors);
+
+ switch (basis_qr.rank()) {
+ case 0:
+ // This should never happen, as it implies that both the gradient
+ // and the Gauss-Newton step are zero. In this case, the minimizer should
+ // have stopped due to the gradient being too small.
+ LOG(ERROR) << "Rank of subspace basis is 0. "
+ << "This means that the gradient at the current iterate is "
+ << "zero but the optimization has not been terminated. "
+ << "You may have found a bug in Ceres.";
+ return false;
+
+ case 1:
+ // Gradient and Gauss-Newton step coincide, so we lie on one of the
+ // major axes of the quadratic problem. In this case, we simply move
+ // along the gradient until we reach the trust region boundary.
+ subspace_is_one_dimensional_ = true;
+ return true;
+
+ case 2:
+ subspace_is_one_dimensional_ = false;
+ break;
+
+ default:
+ LOG(ERROR) << "Rank of the subspace basis matrix is reported to be "
+ << "greater than 2. As the matrix contains only two "
+ << "columns this cannot be true and is indicative of "
+ << "a bug.";
+ return false;
+ }
+
+ // The subspace is two-dimensional, so compute the subspace model.
+ // Given the basis U, this is
+ //
+ // subspace_g_ = g_scaled^T U
+ //
+ // and
+ //
+ // subspace_B_ = U^T (J_scaled^T J_scaled) U
+ //
+ // As J_scaled = J * D^-1, the latter becomes
+ //
+ // subspace_B_ = ((U^T D^-1) J^T) (J (D^-1 U))
+ // = (J (D^-1 U))^T (J (D^-1 U))
+
+ subspace_basis_ =
+ basis_qr.householderQ() * Matrix::Identity(jacobian->num_cols(), 2);
+
+ subspace_g_ = subspace_basis_.transpose() * gradient_;
+
+ Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor>
+ Jb(2, jacobian->num_rows());
+ Jb.setZero();
+
+ Vector tmp;
+ tmp = (subspace_basis_.col(0).array() / diagonal_.array()).matrix();
+ jacobian->RightMultiply(tmp.data(), Jb.row(0).data());
+ tmp = (subspace_basis_.col(1).array() / diagonal_.array()).matrix();
+ jacobian->RightMultiply(tmp.data(), Jb.row(1).data());
+
+ subspace_B_ = Jb * Jb.transpose();
+
+ return true;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/dogleg_strategy.h b/internal/ceres/dogleg_strategy.h
new file mode 100644
index 0000000..bff1689
--- /dev/null
+++ b/internal/ceres/dogleg_strategy.h
@@ -0,0 +1,163 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_DOGLEG_STRATEGY_H_
+#define CERES_INTERNAL_DOGLEG_STRATEGY_H_
+
+#include "ceres/linear_solver.h"
+#include "ceres/trust_region_strategy.h"
+
+namespace ceres {
+namespace internal {
+
+// Dogleg step computation and trust region sizing strategy based on
+// on "Methods for Nonlinear Least Squares" by K. Madsen, H.B. Nielsen
+// and O. Tingleff. Available to download from
+//
+// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
+//
+// One minor modification is that instead of computing the pure
+// Gauss-Newton step, we compute a regularized version of it. This is
+// because the Jacobian is often rank-deficient and in such cases
+// using a direct solver leads to numerical failure.
+//
+// If SUBSPACE is passed as the type argument to the constructor, the
+// DoglegStrategy follows the approach by Shultz, Schnabel, Byrd.
+// This finds the exact optimum over the two-dimensional subspace
+// spanned by the two Dogleg vectors.
+class DoglegStrategy : public TrustRegionStrategy {
+public:
+ DoglegStrategy(const TrustRegionStrategy::Options& options);
+ virtual ~DoglegStrategy() {}
+
+ // TrustRegionStrategy interface
+ virtual Summary ComputeStep(const PerSolveOptions& per_solve_options,
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step);
+ virtual void StepAccepted(double step_quality);
+ virtual void StepRejected(double step_quality);
+ virtual void StepIsInvalid();
+
+ virtual double Radius() const;
+
+ // These functions are predominantly for testing.
+ Vector gradient() const { return gradient_; }
+ Vector gauss_newton_step() const { return gauss_newton_step_; }
+ Matrix subspace_basis() const { return subspace_basis_; }
+ Vector subspace_g() const { return subspace_g_; }
+ Matrix subspace_B() const { return subspace_B_; }
+
+ private:
+ typedef Eigen::Matrix<double, 2, 1, Eigen::DontAlign> Vector2d;
+ typedef Eigen::Matrix<double, 2, 2, Eigen::DontAlign> Matrix2d;
+
+ LinearSolver::Summary ComputeGaussNewtonStep(SparseMatrix* jacobian,
+ const double* residuals);
+ void ComputeCauchyPoint(SparseMatrix* jacobian);
+ void ComputeGradient(SparseMatrix* jacobian, const double* residuals);
+ void ComputeTraditionalDoglegStep(double* step);
+ bool ComputeSubspaceModel(SparseMatrix* jacobian);
+ void ComputeSubspaceDoglegStep(double* step);
+
+ bool FindMinimumOnTrustRegionBoundary(Vector2d* minimum) const;
+ Vector MakePolynomialForBoundaryConstrainedProblem() const;
+ Vector2d ComputeSubspaceStepFromRoot(double lambda) const;
+ double EvaluateSubspaceModel(const Vector2d& x) const;
+
+ LinearSolver* linear_solver_;
+ double radius_;
+ const double max_radius_;
+
+ const double min_diagonal_;
+ const double max_diagonal_;
+
+ // mu is used to scale the diagonal matrix used to make the
+ // Gauss-Newton solve full rank. In each solve, the strategy starts
+ // out with mu = min_mu, and tries values upto max_mu. If the user
+ // reports an invalid step, the value of mu_ is increased so that
+ // the next solve starts with a stronger regularization.
+ //
+ // If a successful step is reported, then the value of mu_ is
+ // decreased with a lower bound of min_mu_.
+ double mu_;
+ const double min_mu_;
+ const double max_mu_;
+ const double mu_increase_factor_;
+ const double increase_threshold_;
+ const double decrease_threshold_;
+
+ Vector diagonal_; // sqrt(diag(J^T J))
+ Vector lm_diagonal_;
+
+ Vector gradient_;
+ Vector gauss_newton_step_;
+
+ // cauchy_step = alpha * gradient
+ double alpha_;
+ double dogleg_step_norm_;
+
+ // When, ComputeStep is called, reuse_ indicates whether the
+ // Gauss-Newton and Cauchy steps from the last call to ComputeStep
+ // can be reused or not.
+ //
+ // If the user called StepAccepted, then it is expected that the
+ // user has recomputed the Jacobian matrix and new Gauss-Newton
+ // solve is needed and reuse is set to false.
+ //
+ // If the user called StepRejected, then it is expected that the
+ // user wants to solve the trust region problem with the same matrix
+ // but a different trust region radius and the Gauss-Newton and
+ // Cauchy steps can be reused to compute the Dogleg, thus reuse is
+ // set to true.
+ //
+ // If the user called StepIsInvalid, then there was a numerical
+ // problem with the step computed in the last call to ComputeStep,
+ // and the regularization used to do the Gauss-Newton solve is
+ // increased and a new solve should be done when ComputeStep is
+ // called again, thus reuse is set to false.
+ bool reuse_;
+
+ // The dogleg type determines how the minimum of the local
+ // quadratic model is found.
+ DoglegType dogleg_type_;
+
+ // If the type is SUBSPACE_DOGLEG, the two-dimensional
+ // model 1/2 x^T B x + g^T x has to be computed and stored.
+ bool subspace_is_one_dimensional_;
+ Matrix subspace_basis_;
+ Vector2d subspace_g_;
+ Matrix2d subspace_B_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DOGLEG_STRATEGY_H_
diff --git a/internal/ceres/dogleg_strategy_test.cc b/internal/ceres/dogleg_strategy_test.cc
new file mode 100644
index 0000000..9e2ed9f
--- /dev/null
+++ b/internal/ceres/dogleg_strategy_test.cc
@@ -0,0 +1,291 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: moll.markus@arcor.de (Markus Moll)
+
+#include <limits>
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/dense_qr_solver.h"
+#include "ceres/dogleg_strategy.h"
+#include "ceres/linear_solver.h"
+#include "ceres/trust_region_strategy.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+class Fixture : public testing::Test {
+ protected:
+ scoped_ptr<DenseSparseMatrix> jacobian_;
+ Vector residual_;
+ Vector x_;
+ TrustRegionStrategy::Options options_;
+};
+
+// A test problem where
+//
+// J^T J = Q diag([1 2 4 8 16 32]) Q^T
+//
+// where Q is a randomly chosen orthonormal basis of R^6.
+// The residual is chosen so that the minimum of the quadratic function is
+// at (1, 1, 1, 1, 1, 1). It is therefore at a distance of sqrt(6) ~ 2.45
+// from the origin.
+class DoglegStrategyFixtureEllipse : public Fixture {
+ protected:
+ virtual void SetUp() {
+ Matrix basis(6, 6);
+ // The following lines exceed 80 characters for better readability.
+ basis << -0.1046920933796121, -0.7449367449921986, -0.4190744502875876, -0.4480450716142566, 0.2375351607929440, -0.0363053418882862,
+ 0.4064975684355914, 0.2681113508511354, -0.7463625494601520, -0.0803264850508117, -0.4463149623021321, 0.0130224954867195,
+ -0.5514387729089798, 0.1026621026168657, -0.5008316122125011, 0.5738122212666414, 0.2974664724007106, 0.1296020877535158,
+ 0.5037835370947156, 0.2668479925183712, -0.1051754618492798, -0.0272739396578799, 0.7947481647088278, -0.1776623363955670,
+ -0.4005458426625444, 0.2939330589634109, -0.0682629380550051, -0.2895448882503687, -0.0457239396341685, -0.8139899477847840,
+ -0.3247764582762654, 0.4528151365941945, -0.0276683863102816, -0.6155994592510784, 0.1489240599972848, 0.5362574892189350;
+
+ Vector Ddiag(6);
+ Ddiag << 1.0, 2.0, 4.0, 8.0, 16.0, 32.0;
+
+ Matrix sqrtD = Ddiag.array().sqrt().matrix().asDiagonal();
+ Matrix jacobian = sqrtD * basis;
+ jacobian_.reset(new DenseSparseMatrix(jacobian));
+
+ Vector minimum(6);
+ minimum << 1.0, 1.0, 1.0, 1.0, 1.0, 1.0;
+ residual_ = -jacobian * minimum;
+
+ x_.resize(6);
+ x_.setZero();
+
+ options_.lm_min_diagonal = 1.0;
+ options_.lm_max_diagonal = 1.0;
+ }
+};
+
+// A test problem where
+//
+// J^T J = diag([1 2 4 8 16 32]) .
+//
+// The residual is chosen so that the minimum of the quadratic function is
+// at (0, 0, 1, 0, 0, 0). It is therefore at a distance of 1 from the origin.
+// The gradient at the origin points towards the global minimum.
+class DoglegStrategyFixtureValley : public Fixture {
+ protected:
+ virtual void SetUp() {
+ Vector Ddiag(6);
+ Ddiag << 1.0, 2.0, 4.0, 8.0, 16.0, 32.0;
+
+ Matrix jacobian = Ddiag.asDiagonal();
+ jacobian_.reset(new DenseSparseMatrix(jacobian));
+
+ Vector minimum(6);
+ minimum << 0.0, 0.0, 1.0, 0.0, 0.0, 0.0;
+ residual_ = -jacobian * minimum;
+
+ x_.resize(6);
+ x_.setZero();
+
+ options_.lm_min_diagonal = 1.0;
+ options_.lm_max_diagonal = 1.0;
+ }
+};
+
+const double kTolerance = 1e-14;
+const double kToleranceLoose = 1e-5;
+const double kEpsilon = std::numeric_limits<double>::epsilon();
+
+} // namespace
+
+// The DoglegStrategy must never return a step that is longer than the current
+// trust region radius.
+TEST_F(DoglegStrategyFixtureEllipse, TrustRegionObeyedTraditional) {
+ scoped_ptr<LinearSolver> linear_solver(
+ new DenseQRSolver(LinearSolver::Options()));
+ options_.linear_solver = linear_solver.get();
+ // The global minimum is at (1, 1, ..., 1), so the distance to it is sqrt(6.0).
+ // By restricting the trust region to a radius of 2.0, we test if the trust
+ // region is actually obeyed.
+ options_.dogleg_type = TRADITIONAL_DOGLEG;
+ options_.initial_radius = 2.0;
+ options_.max_radius = 2.0;
+
+ DoglegStrategy strategy(options_);
+ TrustRegionStrategy::PerSolveOptions pso;
+
+ TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
+ jacobian_.get(),
+ residual_.data(),
+ x_.data());
+
+ EXPECT_NE(summary.termination_type, FAILURE);
+ EXPECT_LE(x_.norm(), options_.initial_radius * (1.0 + 4.0 * kEpsilon));
+}
+
+TEST_F(DoglegStrategyFixtureEllipse, TrustRegionObeyedSubspace) {
+ scoped_ptr<LinearSolver> linear_solver(
+ new DenseQRSolver(LinearSolver::Options()));
+ options_.linear_solver = linear_solver.get();
+ options_.dogleg_type = SUBSPACE_DOGLEG;
+ options_.initial_radius = 2.0;
+ options_.max_radius = 2.0;
+
+ DoglegStrategy strategy(options_);
+ TrustRegionStrategy::PerSolveOptions pso;
+
+ TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
+ jacobian_.get(),
+ residual_.data(),
+ x_.data());
+
+ EXPECT_NE(summary.termination_type, FAILURE);
+ EXPECT_LE(x_.norm(), options_.initial_radius * (1.0 + 4.0 * kEpsilon));
+}
+
+TEST_F(DoglegStrategyFixtureEllipse, CorrectGaussNewtonStep) {
+ scoped_ptr<LinearSolver> linear_solver(
+ new DenseQRSolver(LinearSolver::Options()));
+ options_.linear_solver = linear_solver.get();
+ options_.dogleg_type = SUBSPACE_DOGLEG;
+ options_.initial_radius = 10.0;
+ options_.max_radius = 10.0;
+
+ DoglegStrategy strategy(options_);
+ TrustRegionStrategy::PerSolveOptions pso;
+
+ TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
+ jacobian_.get(),
+ residual_.data(),
+ x_.data());
+
+ EXPECT_NE(summary.termination_type, FAILURE);
+ EXPECT_NEAR(x_(0), 1.0, kToleranceLoose);
+ EXPECT_NEAR(x_(1), 1.0, kToleranceLoose);
+ EXPECT_NEAR(x_(2), 1.0, kToleranceLoose);
+ EXPECT_NEAR(x_(3), 1.0, kToleranceLoose);
+ EXPECT_NEAR(x_(4), 1.0, kToleranceLoose);
+ EXPECT_NEAR(x_(5), 1.0, kToleranceLoose);
+}
+
+// Test if the subspace basis is a valid orthonormal basis of the space spanned
+// by the gradient and the Gauss-Newton point.
+TEST_F(DoglegStrategyFixtureEllipse, ValidSubspaceBasis) {
+ scoped_ptr<LinearSolver> linear_solver(
+ new DenseQRSolver(LinearSolver::Options()));
+ options_.linear_solver = linear_solver.get();
+ options_.dogleg_type = SUBSPACE_DOGLEG;
+ options_.initial_radius = 2.0;
+ options_.max_radius = 2.0;
+
+ DoglegStrategy strategy(options_);
+ TrustRegionStrategy::PerSolveOptions pso;
+
+ TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
+ jacobian_.get(),
+ residual_.data(),
+ x_.data());
+
+ // Check if the basis is orthonormal.
+ const Matrix basis = strategy.subspace_basis();
+ EXPECT_NEAR(basis.col(0).norm(), 1.0, kTolerance);
+ EXPECT_NEAR(basis.col(1).norm(), 1.0, kTolerance);
+ EXPECT_NEAR(basis.col(0).dot(basis.col(1)), 0.0, kTolerance);
+
+ // Check if the gradient projects onto itself.
+ const Vector gradient = strategy.gradient();
+ EXPECT_NEAR((gradient - basis*(basis.transpose()*gradient)).norm(),
+ 0.0,
+ kTolerance);
+
+ // Check if the Gauss-Newton point projects onto itself.
+ const Vector gn = strategy.gauss_newton_step();
+ EXPECT_NEAR((gn - basis*(basis.transpose()*gn)).norm(),
+ 0.0,
+ kTolerance);
+}
+
+// Test if the step is correct if the gradient and the Gauss-Newton step point
+// in the same direction and the Gauss-Newton step is outside the trust region,
+// i.e. the trust region is active.
+TEST_F(DoglegStrategyFixtureValley, CorrectStepLocalOptimumAlongGradient) {
+ scoped_ptr<LinearSolver> linear_solver(
+ new DenseQRSolver(LinearSolver::Options()));
+ options_.linear_solver = linear_solver.get();
+ options_.dogleg_type = SUBSPACE_DOGLEG;
+ options_.initial_radius = 0.25;
+ options_.max_radius = 0.25;
+
+ DoglegStrategy strategy(options_);
+ TrustRegionStrategy::PerSolveOptions pso;
+
+ TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
+ jacobian_.get(),
+ residual_.data(),
+ x_.data());
+
+ EXPECT_NE(summary.termination_type, FAILURE);
+ EXPECT_NEAR(x_(0), 0.0, kToleranceLoose);
+ EXPECT_NEAR(x_(1), 0.0, kToleranceLoose);
+ EXPECT_NEAR(x_(2), options_.initial_radius, kToleranceLoose);
+ EXPECT_NEAR(x_(3), 0.0, kToleranceLoose);
+ EXPECT_NEAR(x_(4), 0.0, kToleranceLoose);
+ EXPECT_NEAR(x_(5), 0.0, kToleranceLoose);
+}
+
+// Test if the step is correct if the gradient and the Gauss-Newton step point
+// in the same direction and the Gauss-Newton step is inside the trust region,
+// i.e. the trust region is inactive.
+TEST_F(DoglegStrategyFixtureValley, CorrectStepGlobalOptimumAlongGradient) {
+ scoped_ptr<LinearSolver> linear_solver(
+ new DenseQRSolver(LinearSolver::Options()));
+ options_.linear_solver = linear_solver.get();
+ options_.dogleg_type = SUBSPACE_DOGLEG;
+ options_.initial_radius = 2.0;
+ options_.max_radius = 2.0;
+
+ DoglegStrategy strategy(options_);
+ TrustRegionStrategy::PerSolveOptions pso;
+
+ TrustRegionStrategy::Summary summary = strategy.ComputeStep(pso,
+ jacobian_.get(),
+ residual_.data(),
+ x_.data());
+
+ EXPECT_NE(summary.termination_type, FAILURE);
+ EXPECT_NEAR(x_(0), 0.0, kToleranceLoose);
+ EXPECT_NEAR(x_(1), 0.0, kToleranceLoose);
+ EXPECT_NEAR(x_(2), 1.0, kToleranceLoose);
+ EXPECT_NEAR(x_(3), 0.0, kToleranceLoose);
+ EXPECT_NEAR(x_(4), 0.0, kToleranceLoose);
+ EXPECT_NEAR(x_(5), 0.0, kToleranceLoose);
+}
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/evaluator.cc b/internal/ceres/evaluator.cc
new file mode 100644
index 0000000..a3ce6f0
--- /dev/null
+++ b/internal/ceres/evaluator.cc
@@ -0,0 +1,147 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include <vector>
+#include "ceres/block_evaluate_preparer.h"
+#include "ceres/block_jacobian_writer.h"
+#include "ceres/compressed_row_jacobian_writer.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/crs_matrix.h"
+#include "ceres/dense_jacobian_writer.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/port.h"
+#include "ceres/program_evaluator.h"
+#include "ceres/scratch_evaluate_preparer.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+Evaluator::~Evaluator() {}
+
+Evaluator* Evaluator::Create(const Evaluator::Options& options,
+ Program* program,
+ string* error) {
+ switch (options.linear_solver_type) {
+ case DENSE_QR:
+ case DENSE_NORMAL_CHOLESKY:
+ return new ProgramEvaluator<ScratchEvaluatePreparer,
+ DenseJacobianWriter>(options,
+ program);
+ case DENSE_SCHUR:
+ case SPARSE_SCHUR:
+ case ITERATIVE_SCHUR:
+ case CGNR:
+ return new ProgramEvaluator<BlockEvaluatePreparer,
+ BlockJacobianWriter>(options,
+ program);
+ case SPARSE_NORMAL_CHOLESKY:
+ return new ProgramEvaluator<ScratchEvaluatePreparer,
+ CompressedRowJacobianWriter>(options,
+ program);
+ default:
+ *error = "Invalid Linear Solver Type. Unable to create evaluator.";
+ return NULL;
+ }
+}
+
+bool Evaluator::Evaluate(Program* program,
+ int num_threads,
+ double* cost,
+ vector<double>* residuals,
+ vector<double>* gradient,
+ CRSMatrix* output_jacobian) {
+ CHECK_GE(num_threads, 1)
+ << "This is a Ceres bug; please contact the developers!";
+ CHECK_NOTNULL(cost);
+
+ // Setup the Parameter indices and offsets before an evaluator can
+ // be constructed and used.
+ program->SetParameterOffsetsAndIndex();
+
+ Evaluator::Options evaluator_options;
+ evaluator_options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+ evaluator_options.num_threads = num_threads;
+
+ string error;
+ scoped_ptr<Evaluator> evaluator(
+ Evaluator::Create(evaluator_options, program, &error));
+ if (evaluator.get() == NULL) {
+ LOG(ERROR) << "Unable to create an Evaluator object. "
+ << "Error: " << error
+ << "This is a Ceres bug; please contact the developers!";
+ return false;
+ }
+
+ if (residuals !=NULL) {
+ residuals->resize(evaluator->NumResiduals());
+ }
+
+ if (gradient != NULL) {
+ gradient->resize(evaluator->NumEffectiveParameters());
+ }
+
+ scoped_ptr<CompressedRowSparseMatrix> jacobian;
+ if (output_jacobian != NULL) {
+ jacobian.reset(
+ down_cast<CompressedRowSparseMatrix*>(evaluator->CreateJacobian()));
+ }
+
+ // Point the state pointers to the user state pointers. This is
+ // needed so that we can extract a parameter vector which is then
+ // passed to Evaluator::Evaluate.
+ program->SetParameterBlockStatePtrsToUserStatePtrs();
+
+ // Copy the value of the parameter blocks into a vector, since the
+ // Evaluate::Evaluate method needs its input as such. The previous
+ // call to SetParameterBlockStatePtrsToUserStatePtrs ensures that
+ // these values are the ones corresponding to the actual state of
+ // the parameter blocks, rather than the temporary state pointer
+ // used for evaluation.
+ Vector parameters(program->NumParameters());
+ program->ParameterBlocksToStateVector(parameters.data());
+
+ if (!evaluator->Evaluate(parameters.data(),
+ cost,
+ residuals != NULL ? &(*residuals)[0] : NULL,
+ gradient != NULL ? &(*gradient)[0] : NULL,
+ jacobian.get())) {
+ return false;
+ }
+
+ if (output_jacobian != NULL) {
+ jacobian->ToCRSMatrix(output_jacobian);
+ }
+
+ return true;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/evaluator.h b/internal/ceres/evaluator.h
new file mode 100644
index 0000000..3bbf50a
--- /dev/null
+++ b/internal/ceres/evaluator.h
@@ -0,0 +1,160 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+// keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_EVALUATOR_H_
+#define CERES_INTERNAL_EVALUATOR_H_
+
+#include <string>
+#include <vector>
+#include "ceres/internal/port.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+struct CRSMatrix;
+
+namespace internal {
+
+class Program;
+class SparseMatrix;
+
+// The Evaluator interface offers a way to interact with a least squares cost
+// function that is useful for an optimizer that wants to minimize the least
+// squares objective. This insulates the optimizer from issues like Jacobian
+// storage, parameterization, etc.
+class Evaluator {
+ public:
+ virtual ~Evaluator();
+
+ struct Options {
+ Options()
+ : num_threads(1),
+ num_eliminate_blocks(-1),
+ linear_solver_type(DENSE_QR) {}
+
+ int num_threads;
+ int num_eliminate_blocks;
+ LinearSolverType linear_solver_type;
+ };
+
+ static Evaluator* Create(const Options& options,
+ Program* program,
+ string* error);
+
+
+ // This is used for computing the cost, residual and Jacobian for
+ // returning to the user. For actually solving the optimization
+ // problem, the optimization algorithm uses the ProgramEvaluator
+ // objects directly.
+ //
+ // The residual, gradients and jacobian pointers can be NULL, in
+ // which case they will not be evaluated. cost cannot be NULL.
+ //
+ // The parallelism of the evaluator is controlled by num_threads; it
+ // should be at least 1.
+ //
+ // Note: That this function does not take a parameter vector as
+ // input. The parameter blocks are evaluated on the values contained
+ // in the arrays pointed to by their user_state pointers.
+ //
+ // Also worth noting is that this function mutates program by
+ // calling Program::SetParameterOffsetsAndIndex() on it so that an
+ // evaluator object can be constructed.
+ static bool Evaluate(Program* program,
+ int num_threads,
+ double* cost,
+ vector<double>* residuals,
+ vector<double>* gradient,
+ CRSMatrix* jacobian);
+
+ // Build and return a sparse matrix for storing and working with the Jacobian
+ // of the objective function. The jacobian has dimensions
+ // NumEffectiveParameters() by NumParameters(), and is typically extremely
+ // sparse. Since the sparsity pattern of the Jacobian remains constant over
+ // the lifetime of the optimization problem, this method is used to
+ // instantiate a SparseMatrix object with the appropriate sparsity structure
+ // (which can be an expensive operation) and then reused by the optimization
+ // algorithm and the various linear solvers.
+ //
+ // It is expected that the classes implementing this interface will be aware
+ // of their client's requirements for the kind of sparse matrix storage and
+ // layout that is needed for an efficient implementation. For example
+ // CompressedRowOptimizationProblem creates a compressed row representation of
+ // the jacobian for use with CHOLMOD, where as BlockOptimizationProblem
+ // creates a BlockSparseMatrix representation of the jacobian for use in the
+ // Schur complement based methods.
+ virtual SparseMatrix* CreateJacobian() const = 0;
+
+ // Evaluate the cost function for the given state. Returns the cost,
+ // residuals, and jacobian in the corresponding arguments. Both residuals and
+ // jacobian are optional; to avoid computing them, pass NULL.
+ //
+ // If non-NULL, the Jacobian must have a suitable sparsity pattern; only the
+ // values array of the jacobian is modified.
+ //
+ // state is an array of size NumParameters(), cost is a pointer to a single
+ // double, and residuals is an array of doubles of size NumResiduals().
+ virtual bool Evaluate(const double* state,
+ double* cost,
+ double* residuals,
+ double* gradient,
+ SparseMatrix* jacobian) = 0;
+
+ // Make a change delta (of size NumEffectiveParameters()) to state (of size
+ // NumParameters()) and store the result in state_plus_delta.
+ //
+ // In the case that there are no parameterizations used, this is equivalent to
+ //
+ // state_plus_delta[i] = state[i] + delta[i] ;
+ //
+ // however, the mapping is more complicated in the case of parameterizations
+ // like quaternions. This is the same as the "Plus()" operation in
+ // local_parameterization.h, but operating over the entire state vector for a
+ // problem.
+ virtual bool Plus(const double* state,
+ const double* delta,
+ double* state_plus_delta) const = 0;
+
+ // The number of parameters in the optimization problem.
+ virtual int NumParameters() const = 0;
+
+ // This is the effective number of parameters that the optimizer may adjust.
+ // This applies when there are parameterizations on some of the parameters.
+ virtual int NumEffectiveParameters() const = 0;
+
+ // The number of residuals in the optimization problem.
+ virtual int NumResiduals() const = 0;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_EVALUATOR_H_
diff --git a/internal/ceres/evaluator_test.cc b/internal/ceres/evaluator_test.cc
new file mode 100644
index 0000000..a4e7b25
--- /dev/null
+++ b/internal/ceres/evaluator_test.cc
@@ -0,0 +1,959 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Tests shared across evaluators. The tests try all combinations of linear
+// solver and num_eliminate_blocks (for schur-based solvers).
+
+#include "ceres/evaluator.h"
+
+#include "ceres/casts.h"
+#include "ceres/cost_function.h"
+#include "ceres/crs_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/types.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// TODO(keir): Consider pushing this into a common test utils file.
+template<int kFactor, int kNumResiduals,
+ int N0 = 0, int N1 = 0, int N2 = 0, bool kSucceeds = true>
+class ParameterIgnoringCostFunction
+ : public SizedCostFunction<kNumResiduals, N0, N1, N2> {
+ typedef SizedCostFunction<kNumResiduals, N0, N1, N2> Base;
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ for (int i = 0; i < Base::num_residuals(); ++i) {
+ residuals[i] = i + 1;
+ }
+ if (jacobians) {
+ for (int k = 0; k < Base::parameter_block_sizes().size(); ++k) {
+ // The jacobians here are full sized, but they are transformed in the
+ // evaluator into the "local" jacobian. In the tests, the "subset
+ // constant" parameterization is used, which should pick out columns
+ // from these jacobians. Put values in the jacobian that make this
+ // obvious; in particular, make the jacobians like this:
+ //
+ // 1 2 3 4 ...
+ // 1 2 3 4 ... .* kFactor
+ // 1 2 3 4 ...
+ //
+ // where the multiplication by kFactor makes it easier to distinguish
+ // between Jacobians of different residuals for the same parameter.
+ if (jacobians[k] != NULL) {
+ MatrixRef jacobian(jacobians[k],
+ Base::num_residuals(),
+ Base::parameter_block_sizes()[k]);
+ for (int j = 0; j < Base::parameter_block_sizes()[k]; ++j) {
+ jacobian.col(j).setConstant(kFactor * (j + 1));
+ }
+ }
+ }
+ }
+ return kSucceeds;
+ }
+};
+
+struct ExpectedEvaluation {
+ int num_rows;
+ int num_cols;
+ double cost;
+ const double residuals[50];
+ const double gradient[50];
+ const double jacobian[200];
+};
+
+void CompareEvaluations(int expected_num_rows,
+ int expected_num_cols,
+ double expected_cost,
+ const double* expected_residuals,
+ const double* expected_gradient,
+ const double* expected_jacobian,
+ const double actual_cost,
+ const double* actual_residuals,
+ const double* actual_gradient,
+ const double* actual_jacobian) {
+ EXPECT_EQ(expected_cost, actual_cost);
+
+ if (expected_residuals != NULL) {
+ ConstVectorRef expected_residuals_vector(expected_residuals,
+ expected_num_rows);
+ ConstVectorRef actual_residuals_vector(actual_residuals,
+ expected_num_rows);
+ EXPECT_TRUE((actual_residuals_vector.array() ==
+ expected_residuals_vector.array()).all())
+ << "Actual:\n" << actual_residuals_vector
+ << "\nExpected:\n" << expected_residuals_vector;
+ }
+
+ if (expected_gradient != NULL) {
+ ConstVectorRef expected_gradient_vector(expected_gradient,
+ expected_num_cols);
+ ConstVectorRef actual_gradient_vector(actual_gradient,
+ expected_num_cols);
+
+ EXPECT_TRUE((actual_gradient_vector.array() ==
+ expected_gradient_vector.array()).all())
+ << "Actual:\n" << actual_gradient_vector.transpose()
+ << "\nExpected:\n" << expected_gradient_vector.transpose();
+ }
+
+ if (expected_jacobian != NULL) {
+ ConstMatrixRef expected_jacobian_matrix(expected_jacobian,
+ expected_num_rows,
+ expected_num_cols);
+ ConstMatrixRef actual_jacobian_matrix(actual_jacobian,
+ expected_num_rows,
+ expected_num_cols);
+ EXPECT_TRUE((actual_jacobian_matrix.array() ==
+ expected_jacobian_matrix.array()).all())
+ << "Actual:\n" << actual_jacobian_matrix
+ << "\nExpected:\n" << expected_jacobian_matrix;
+ }
+}
+
+
+struct EvaluatorTest
+ : public ::testing::TestWithParam<pair<LinearSolverType, int> > {
+ Evaluator* CreateEvaluator(Program* program) {
+ // This program is straight from the ProblemImpl, and so has no index/offset
+ // yet; compute it here as required by the evalutor implementations.
+ program->SetParameterOffsetsAndIndex();
+
+ VLOG(1) << "Creating evaluator with type: " << GetParam().first
+ << " and num_eliminate_blocks: " << GetParam().second;
+ Evaluator::Options options;
+ options.linear_solver_type = GetParam().first;
+ options.num_eliminate_blocks = GetParam().second;
+ string error;
+ return Evaluator::Create(options, program, &error);
+ }
+
+ void EvaluateAndCompare(ProblemImpl *problem,
+ int expected_num_rows,
+ int expected_num_cols,
+ double expected_cost,
+ const double* expected_residuals,
+ const double* expected_gradient,
+ const double* expected_jacobian) {
+ scoped_ptr<Evaluator> evaluator(
+ CreateEvaluator(problem->mutable_program()));
+ int num_residuals = expected_num_rows;
+ int num_parameters = expected_num_cols;
+
+ double cost = -1;
+
+ Vector residuals(num_residuals);
+ residuals.setConstant(-2000);
+
+ Vector gradient(num_parameters);
+ gradient.setConstant(-3000);
+
+ scoped_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
+
+ ASSERT_EQ(expected_num_rows, evaluator->NumResiduals());
+ ASSERT_EQ(expected_num_cols, evaluator->NumEffectiveParameters());
+ ASSERT_EQ(expected_num_rows, jacobian->num_rows());
+ ASSERT_EQ(expected_num_cols, jacobian->num_cols());
+
+ vector<double> state(evaluator->NumParameters());
+
+ ASSERT_TRUE(evaluator->Evaluate(
+ &state[0],
+ &cost,
+ expected_residuals != NULL ? &residuals[0] : NULL,
+ expected_gradient != NULL ? &gradient[0] : NULL,
+ expected_jacobian != NULL ? jacobian.get() : NULL));
+
+ Matrix actual_jacobian;
+ if (expected_jacobian != NULL) {
+ jacobian->ToDenseMatrix(&actual_jacobian);
+ }
+
+ CompareEvaluations(expected_num_rows,
+ expected_num_cols,
+ expected_cost,
+ expected_residuals,
+ expected_gradient,
+ expected_jacobian,
+ cost,
+ &residuals[0],
+ &gradient[0],
+ actual_jacobian.data());
+ }
+
+ // Try all combinations of parameters for the evaluator.
+ void CheckAllEvaluationCombinations(const ExpectedEvaluation &expected) {
+ for (int i = 0; i < 8; ++i) {
+ EvaluateAndCompare(&problem,
+ expected.num_rows,
+ expected.num_cols,
+ expected.cost,
+ (i & 1) ? expected.residuals : NULL,
+ (i & 2) ? expected.gradient : NULL,
+ (i & 4) ? expected.jacobian : NULL);
+ }
+ }
+
+ // The values are ignored completely by the cost function.
+ double x[2];
+ double y[3];
+ double z[4];
+
+ ProblemImpl problem;
+};
+
+void SetSparseMatrixConstant(SparseMatrix* sparse_matrix, double value) {
+ VectorRef(sparse_matrix->mutable_values(),
+ sparse_matrix->num_nonzeros()).setConstant(value);
+}
+
+TEST_P(EvaluatorTest, SingleResidualProblem) {
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 3, 2, 3, 4>,
+ NULL,
+ x, y, z);
+
+ ExpectedEvaluation expected = {
+ // Rows/columns
+ 3, 9,
+ // Cost
+ 7.0,
+ // Residuals
+ { 1.0, 2.0, 3.0 },
+ // Gradient
+ { 6.0, 12.0, // x
+ 6.0, 12.0, 18.0, // y
+ 6.0, 12.0, 18.0, 24.0, // z
+ },
+ // Jacobian
+ // x y z
+ { 1, 2, 1, 2, 3, 1, 2, 3, 4,
+ 1, 2, 1, 2, 3, 1, 2, 3, 4,
+ 1, 2, 1, 2, 3, 1, 2, 3, 4
+ }
+ };
+ CheckAllEvaluationCombinations(expected);
+}
+
+TEST_P(EvaluatorTest, SingleResidualProblemWithPermutedParameters) {
+ // Add the parameters in explicit order to force the ordering in the program.
+ problem.AddParameterBlock(x, 2);
+ problem.AddParameterBlock(y, 3);
+ problem.AddParameterBlock(z, 4);
+
+ // Then use a cost function which is similar to the others, but swap around
+ // the ordering of the parameters to the cost function. This shouldn't affect
+ // the jacobian evaluation, but requires explicit handling in the evaluators.
+ // At one point the compressed row evaluator had a bug that went undetected
+ // for a long time, since by chance most users added parameters to the problem
+ // in the same order that they occured as parameters to a cost function.
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 3, 4, 3, 2>,
+ NULL,
+ z, y, x);
+
+ ExpectedEvaluation expected = {
+ // Rows/columns
+ 3, 9,
+ // Cost
+ 7.0,
+ // Residuals
+ { 1.0, 2.0, 3.0 },
+ // Gradient
+ { 6.0, 12.0, // x
+ 6.0, 12.0, 18.0, // y
+ 6.0, 12.0, 18.0, 24.0, // z
+ },
+ // Jacobian
+ // x y z
+ { 1, 2, 1, 2, 3, 1, 2, 3, 4,
+ 1, 2, 1, 2, 3, 1, 2, 3, 4,
+ 1, 2, 1, 2, 3, 1, 2, 3, 4
+ }
+ };
+ CheckAllEvaluationCombinations(expected);
+}
+
+TEST_P(EvaluatorTest, SingleResidualProblemWithNuisanceParameters) {
+ // These parameters are not used.
+ double a[2];
+ double b[1];
+ double c[1];
+ double d[3];
+
+ // Add the parameters in a mixed order so the Jacobian is "checkered" with the
+ // values from the other parameters.
+ problem.AddParameterBlock(a, 2);
+ problem.AddParameterBlock(x, 2);
+ problem.AddParameterBlock(b, 1);
+ problem.AddParameterBlock(y, 3);
+ problem.AddParameterBlock(c, 1);
+ problem.AddParameterBlock(z, 4);
+ problem.AddParameterBlock(d, 3);
+
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 3, 2, 3, 4>,
+ NULL,
+ x, y, z);
+
+ ExpectedEvaluation expected = {
+ // Rows/columns
+ 3, 16,
+ // Cost
+ 7.0,
+ // Residuals
+ { 1.0, 2.0, 3.0 },
+ // Gradient
+ { 0.0, 0.0, // a
+ 6.0, 12.0, // x
+ 0.0, // b
+ 6.0, 12.0, 18.0, // y
+ 0.0, // c
+ 6.0, 12.0, 18.0, 24.0, // z
+ 0.0, 0.0, 0.0, // d
+ },
+ // Jacobian
+ // a x b y c z d
+ { 0, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 3, 4, 0, 0, 0,
+ 0, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 3, 4, 0, 0, 0,
+ 0, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 3, 4, 0, 0, 0
+ }
+ };
+ CheckAllEvaluationCombinations(expected);
+}
+
+TEST_P(EvaluatorTest, MultipleResidualProblem) {
+ // Add the parameters in explicit order to force the ordering in the program.
+ problem.AddParameterBlock(x, 2);
+ problem.AddParameterBlock(y, 3);
+ problem.AddParameterBlock(z, 4);
+
+ // f(x, y) in R^2
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 2, 2, 3>,
+ NULL,
+ x, y);
+
+ // g(x, z) in R^3
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<2, 3, 2, 4>,
+ NULL,
+ x, z);
+
+ // h(y, z) in R^4
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<3, 4, 3, 4>,
+ NULL,
+ y, z);
+
+ ExpectedEvaluation expected = {
+ // Rows/columns
+ 9, 9,
+ // Cost
+ // f g h
+ ( 1 + 4 + 1 + 4 + 9 + 1 + 4 + 9 + 16) / 2.0,
+ // Residuals
+ { 1.0, 2.0, // f
+ 1.0, 2.0, 3.0, // g
+ 1.0, 2.0, 3.0, 4.0 // h
+ },
+ // Gradient
+ { 15.0, 30.0, // x
+ 33.0, 66.0, 99.0, // y
+ 42.0, 84.0, 126.0, 168.0 // z
+ },
+ // Jacobian
+ // x y z
+ { /* f(x, y) */ 1, 2, 1, 2, 3, 0, 0, 0, 0,
+ 1, 2, 1, 2, 3, 0, 0, 0, 0,
+
+ /* g(x, z) */ 2, 4, 0, 0, 0, 2, 4, 6, 8,
+ 2, 4, 0, 0, 0, 2, 4, 6, 8,
+ 2, 4, 0, 0, 0, 2, 4, 6, 8,
+
+ /* h(y, z) */ 0, 0, 3, 6, 9, 3, 6, 9, 12,
+ 0, 0, 3, 6, 9, 3, 6, 9, 12,
+ 0, 0, 3, 6, 9, 3, 6, 9, 12,
+ 0, 0, 3, 6, 9, 3, 6, 9, 12
+ }
+ };
+ CheckAllEvaluationCombinations(expected);
+}
+
+TEST_P(EvaluatorTest, MultipleResidualsWithLocalParameterizations) {
+ // Add the parameters in explicit order to force the ordering in the program.
+ problem.AddParameterBlock(x, 2);
+
+ // Fix y's first dimension.
+ vector<int> y_fixed;
+ y_fixed.push_back(0);
+ problem.AddParameterBlock(y, 3, new SubsetParameterization(3, y_fixed));
+
+ // Fix z's second dimension.
+ vector<int> z_fixed;
+ z_fixed.push_back(1);
+ problem.AddParameterBlock(z, 4, new SubsetParameterization(4, z_fixed));
+
+ // f(x, y) in R^2
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 2, 2, 3>,
+ NULL,
+ x, y);
+
+ // g(x, z) in R^3
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<2, 3, 2, 4>,
+ NULL,
+ x, z);
+
+ // h(y, z) in R^4
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<3, 4, 3, 4>,
+ NULL,
+ y, z);
+
+ ExpectedEvaluation expected = {
+ // Rows/columns
+ 9, 7,
+ // Cost
+ // f g h
+ ( 1 + 4 + 1 + 4 + 9 + 1 + 4 + 9 + 16) / 2.0,
+ // Residuals
+ { 1.0, 2.0, // f
+ 1.0, 2.0, 3.0, // g
+ 1.0, 2.0, 3.0, 4.0 // h
+ },
+ // Gradient
+ { 15.0, 30.0, // x
+ 66.0, 99.0, // y
+ 42.0, 126.0, 168.0 // z
+ },
+ // Jacobian
+ // x y z
+ { /* f(x, y) */ 1, 2, 2, 3, 0, 0, 0,
+ 1, 2, 2, 3, 0, 0, 0,
+
+ /* g(x, z) */ 2, 4, 0, 0, 2, 6, 8,
+ 2, 4, 0, 0, 2, 6, 8,
+ 2, 4, 0, 0, 2, 6, 8,
+
+ /* h(y, z) */ 0, 0, 6, 9, 3, 9, 12,
+ 0, 0, 6, 9, 3, 9, 12,
+ 0, 0, 6, 9, 3, 9, 12,
+ 0, 0, 6, 9, 3, 9, 12
+ }
+ };
+ CheckAllEvaluationCombinations(expected);
+}
+
+TEST_P(EvaluatorTest, MultipleResidualProblemWithSomeConstantParameters) {
+ // The values are ignored completely by the cost function.
+ double x[2];
+ double y[3];
+ double z[4];
+
+ // Add the parameters in explicit order to force the ordering in the program.
+ problem.AddParameterBlock(x, 2);
+ problem.AddParameterBlock(y, 3);
+ problem.AddParameterBlock(z, 4);
+
+ // f(x, y) in R^2
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 2, 2, 3>,
+ NULL,
+ x, y);
+
+ // g(x, z) in R^3
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<2, 3, 2, 4>,
+ NULL,
+ x, z);
+
+ // h(y, z) in R^4
+ problem.AddResidualBlock(new ParameterIgnoringCostFunction<3, 4, 3, 4>,
+ NULL,
+ y, z);
+
+ // For this test, "z" is constant.
+ problem.SetParameterBlockConstant(z);
+
+ // Create the reduced program which is missing the fixed "z" variable.
+ // Normally, the preprocessing of the program that happens in solver_impl
+ // takes care of this, but we don't want to invoke the solver here.
+ Program reduced_program;
+ vector<ParameterBlock*>* parameter_blocks =
+ problem.mutable_program()->mutable_parameter_blocks();
+
+ // "z" is the last parameter; save it for later and pop it off temporarily.
+ // Note that "z" will still get read during evaluation, so it cannot be
+ // deleted at this point.
+ ParameterBlock* parameter_block_z = parameter_blocks->back();
+ parameter_blocks->pop_back();
+
+ ExpectedEvaluation expected = {
+ // Rows/columns
+ 9, 5,
+ // Cost
+ // f g h
+ ( 1 + 4 + 1 + 4 + 9 + 1 + 4 + 9 + 16) / 2.0,
+ // Residuals
+ { 1.0, 2.0, // f
+ 1.0, 2.0, 3.0, // g
+ 1.0, 2.0, 3.0, 4.0 // h
+ },
+ // Gradient
+ { 15.0, 30.0, // x
+ 33.0, 66.0, 99.0, // y
+ },
+ // Jacobian
+ // x y
+ { /* f(x, y) */ 1, 2, 1, 2, 3,
+ 1, 2, 1, 2, 3,
+
+ /* g(x, z) */ 2, 4, 0, 0, 0,
+ 2, 4, 0, 0, 0,
+ 2, 4, 0, 0, 0,
+
+ /* h(y, z) */ 0, 0, 3, 6, 9,
+ 0, 0, 3, 6, 9,
+ 0, 0, 3, 6, 9,
+ 0, 0, 3, 6, 9
+ }
+ };
+ CheckAllEvaluationCombinations(expected);
+
+ // Restore parameter block z, so it will get freed in a consistent way.
+ parameter_blocks->push_back(parameter_block_z);
+}
+
+TEST_P(EvaluatorTest, EvaluatorAbortsForResidualsThatFailToEvaluate) {
+ // Switch the return value to failure.
+ problem.AddResidualBlock(
+ new ParameterIgnoringCostFunction<20, 3, 2, 3, 4, false>, NULL, x, y, z);
+
+ // The values are ignored.
+ double state[9];
+
+ scoped_ptr<Evaluator> evaluator(CreateEvaluator(problem.mutable_program()));
+ scoped_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
+ double cost;
+ EXPECT_FALSE(evaluator->Evaluate(state, &cost, NULL, NULL, NULL));
+}
+
+// In the pairs, the first argument is the linear solver type, and the second
+// argument is num_eliminate_blocks. Changing the num_eliminate_blocks only
+// makes sense for the schur-based solvers.
+//
+// Try all values of num_eliminate_blocks that make sense given that in the
+// tests a maximum of 4 parameter blocks are present.
+INSTANTIATE_TEST_CASE_P(
+ LinearSolvers,
+ EvaluatorTest,
+ ::testing::Values(make_pair(DENSE_QR, 0),
+ make_pair(DENSE_SCHUR, 0),
+ make_pair(DENSE_SCHUR, 1),
+ make_pair(DENSE_SCHUR, 2),
+ make_pair(DENSE_SCHUR, 3),
+ make_pair(DENSE_SCHUR, 4),
+ make_pair(SPARSE_SCHUR, 0),
+ make_pair(SPARSE_SCHUR, 1),
+ make_pair(SPARSE_SCHUR, 2),
+ make_pair(SPARSE_SCHUR, 3),
+ make_pair(SPARSE_SCHUR, 4),
+ make_pair(ITERATIVE_SCHUR, 0),
+ make_pair(ITERATIVE_SCHUR, 1),
+ make_pair(ITERATIVE_SCHUR, 2),
+ make_pair(ITERATIVE_SCHUR, 3),
+ make_pair(ITERATIVE_SCHUR, 4),
+ make_pair(SPARSE_NORMAL_CHOLESKY, 0)));
+
+// Simple cost function used to check if the evaluator is sensitive to
+// state changes.
+class ParameterSensitiveCostFunction : public SizedCostFunction<2, 2> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ double x1 = parameters[0][0];
+ double x2 = parameters[0][1];
+ residuals[0] = x1 * x1;
+ residuals[1] = x2 * x2;
+
+ if (jacobians != NULL) {
+ double* jacobian = jacobians[0];
+ if (jacobian != NULL) {
+ jacobian[0] = 2.0 * x1;
+ jacobian[1] = 0.0;
+ jacobian[2] = 0.0;
+ jacobian[3] = 2.0 * x2;
+ }
+ }
+ return true;
+ }
+};
+
+TEST(Evaluator, EvaluatorRespectsParameterChanges) {
+ ProblemImpl problem;
+
+ double x[2];
+ x[0] = 1.0;
+ x[1] = 1.0;
+
+ problem.AddResidualBlock(new ParameterSensitiveCostFunction(), NULL, x);
+ Program* program = problem.mutable_program();
+ program->SetParameterOffsetsAndIndex();
+
+ Evaluator::Options options;
+ options.linear_solver_type = DENSE_QR;
+ options.num_eliminate_blocks = 0;
+ string error;
+ scoped_ptr<Evaluator> evaluator(Evaluator::Create(options, program, &error));
+ scoped_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
+
+ ASSERT_EQ(2, jacobian->num_rows());
+ ASSERT_EQ(2, jacobian->num_cols());
+
+ double state[2];
+ state[0] = 2.0;
+ state[1] = 3.0;
+
+ // The original state of a residual block comes from the user's
+ // state. So the original state is 1.0, 1.0, and the only way we get
+ // the 2.0, 3.0 results in the following tests is if it respects the
+ // values in the state vector.
+
+ // Cost only; no residuals and no jacobian.
+ {
+ double cost = -1;
+ ASSERT_TRUE(evaluator->Evaluate(state, &cost, NULL, NULL, NULL));
+ EXPECT_EQ(48.5, cost);
+ }
+
+ // Cost and residuals, no jacobian.
+ {
+ double cost = -1;
+ double residuals[2] = { -2, -2 };
+ ASSERT_TRUE(evaluator->Evaluate(state, &cost, residuals, NULL, NULL));
+ EXPECT_EQ(48.5, cost);
+ EXPECT_EQ(4, residuals[0]);
+ EXPECT_EQ(9, residuals[1]);
+ }
+
+ // Cost, residuals, and jacobian.
+ {
+ double cost = -1;
+ double residuals[2] = { -2, -2};
+ SetSparseMatrixConstant(jacobian.get(), -1);
+ ASSERT_TRUE(evaluator->Evaluate(state,
+ &cost,
+ residuals,
+ NULL,
+ jacobian.get()));
+ EXPECT_EQ(48.5, cost);
+ EXPECT_EQ(4, residuals[0]);
+ EXPECT_EQ(9, residuals[1]);
+ Matrix actual_jacobian;
+ jacobian->ToDenseMatrix(&actual_jacobian);
+
+ Matrix expected_jacobian(2, 2);
+ expected_jacobian
+ << 2 * state[0], 0,
+ 0, 2 * state[1];
+
+ EXPECT_TRUE((actual_jacobian.array() == expected_jacobian.array()).all())
+ << "Actual:\n" << actual_jacobian
+ << "\nExpected:\n" << expected_jacobian;
+ }
+}
+
+// Simple cost function used for testing Evaluator::Evaluate.
+//
+// r_i = i - (j + 1) * x_ij^2
+template <int kNumResiduals, int kNumParameterBlocks >
+class QuadraticCostFunction : public CostFunction {
+ public:
+ QuadraticCostFunction() {
+ CHECK_GT(kNumResiduals, 0);
+ CHECK_GT(kNumParameterBlocks, 0);
+ set_num_residuals(kNumResiduals);
+ for (int i = 0; i < kNumParameterBlocks; ++i) {
+ mutable_parameter_block_sizes()->push_back(kNumResiduals);
+ }
+ }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ for (int i = 0; i < kNumResiduals; ++i) {
+ residuals[i] = i;
+ for (int j = 0; j < kNumParameterBlocks; ++j) {
+ residuals[i] -= (j + 1.0) * parameters[j][i] * parameters[j][i];
+ }
+ }
+
+ if (jacobians == NULL) {
+ return true;
+ }
+
+ for (int j = 0; j < kNumParameterBlocks; ++j) {
+ if (jacobians[j] != NULL) {
+ MatrixRef(jacobians[j], kNumResiduals, kNumResiduals) =
+ (-2.0 * (j + 1.0) *
+ ConstVectorRef(parameters[j], kNumResiduals)).asDiagonal();
+ }
+ }
+
+ return true;
+ }
+};
+
+// Convert a CRSMatrix to a dense Eigen matrix.
+void CRSToDenseMatrix(const CRSMatrix& input, Matrix* output) {
+ Matrix& m = *CHECK_NOTNULL(output);
+ m.resize(input.num_rows, input.num_cols);
+ m.setZero();
+ for (int row = 0; row < input.num_rows; ++row) {
+ for (int j = input.rows[row]; j < input.rows[row + 1]; ++j) {
+ const int col = input.cols[j];
+ m(row, col) = input.values[j];
+ }
+ }
+}
+
+
+class StaticEvaluateTest : public ::testing::Test {
+ protected:
+ void SetUp() {
+ for (int i = 0; i < 6; ++i) {
+ parameters_[i] = static_cast<double>(i + 1);
+ }
+
+ CostFunction* cost_function = new QuadraticCostFunction<2, 2>;
+
+ // f(x, y)
+ problem_.AddResidualBlock(cost_function,
+ NULL,
+ parameters_,
+ parameters_ + 2);
+ // g(y, z)
+ problem_.AddResidualBlock(cost_function,
+ NULL, parameters_ + 2,
+ parameters_ + 4);
+ // h(z, x)
+ problem_.AddResidualBlock(cost_function,
+ NULL,
+ parameters_ + 4,
+ parameters_);
+ }
+
+
+
+ void EvaluateAndCompare(const int expected_num_rows,
+ const int expected_num_cols,
+ const double expected_cost,
+ const double* expected_residuals,
+ const double* expected_gradient,
+ const double* expected_jacobian) {
+ double cost;
+ vector<double> residuals;
+ vector<double> gradient;
+ CRSMatrix jacobian;
+
+ EXPECT_TRUE(Evaluator::Evaluate(
+ problem_.mutable_program(),
+ 1,
+ &cost,
+ expected_residuals != NULL ? &residuals : NULL,
+ expected_gradient != NULL ? &gradient : NULL,
+ expected_jacobian != NULL ? &jacobian : NULL));
+
+ if (expected_residuals != NULL) {
+ EXPECT_EQ(residuals.size(), expected_num_rows);
+ }
+
+ if (expected_gradient != NULL) {
+ EXPECT_EQ(gradient.size(), expected_num_cols);
+ }
+
+ if (expected_jacobian != NULL) {
+ EXPECT_EQ(jacobian.num_rows, expected_num_rows);
+ EXPECT_EQ(jacobian.num_cols, expected_num_cols);
+ }
+
+ Matrix dense_jacobian;
+ if (expected_jacobian != NULL) {
+ CRSToDenseMatrix(jacobian, &dense_jacobian);
+ }
+
+ CompareEvaluations(expected_num_rows,
+ expected_num_cols,
+ expected_cost,
+ expected_residuals,
+ expected_gradient,
+ expected_jacobian,
+ cost,
+ residuals.size() > 0 ? &residuals[0] : NULL,
+ gradient.size() > 0 ? &gradient[0] : NULL,
+ dense_jacobian.data());
+ }
+
+ void CheckAllEvaluationCombinations(const ExpectedEvaluation& expected ) {
+ for (int i = 0; i < 8; ++i) {
+ EvaluateAndCompare(expected.num_rows,
+ expected.num_cols,
+ expected.cost,
+ (i & 1) ? expected.residuals : NULL,
+ (i & 2) ? expected.gradient : NULL,
+ (i & 4) ? expected.jacobian : NULL);
+ }
+
+
+ double new_parameters[6];
+ for (int i = 0; i < 6; ++i) {
+ new_parameters[i] = 0.0;
+ }
+
+ problem_.mutable_program()->StateVectorToParameterBlocks(new_parameters);
+
+ for (int i = 0; i < 8; ++i) {
+ EvaluateAndCompare(expected.num_rows,
+ expected.num_cols,
+ expected.cost,
+ (i & 1) ? expected.residuals : NULL,
+ (i & 2) ? expected.gradient : NULL,
+ (i & 4) ? expected.jacobian : NULL);
+ }
+ }
+
+ ProblemImpl problem_;
+ double parameters_[6];
+};
+
+
+TEST_F(StaticEvaluateTest, MultipleParameterAndResidualBlocks) {
+ ExpectedEvaluation expected = {
+ // Rows/columns
+ 6, 6,
+ // Cost
+ 7607.0,
+ // Residuals
+ { -19.0, -35.0, // f
+ -59.0, -87.0, // g
+ -27.0, -43.0 // h
+ },
+ // Gradient
+ { 146.0, 484.0, // x
+ 582.0, 1256.0, // y
+ 1450.0, 2604.0, // z
+ },
+ // Jacobian
+ // x y z
+ { /* f(x, y) */ -2.0, 0.0, -12.0, 0.0, 0.0, 0.0,
+ 0.0, -4.0, 0.0, -16.0, 0.0, 0.0,
+ /* g(y, z) */ 0.0, 0.0, -6.0, 0.0, -20.0, 0.0,
+ 0.0, 0.0, 0.0, -8.0, 0.0, -24.0,
+ /* h(z, x) */ -4.0, 0.0, 0.0, 0.0, -10.0, 0.0,
+ 0.0, -8.0, 0.0, 0.0, 0.0, -12.0
+ }
+ };
+
+ CheckAllEvaluationCombinations(expected);
+}
+
+TEST_F(StaticEvaluateTest, ConstantParameterBlock) {
+ ExpectedEvaluation expected = {
+ // Rows/columns
+ 6, 6,
+ // Cost
+ 7607.0,
+ // Residuals
+ { -19.0, -35.0, // f
+ -59.0, -87.0, // g
+ -27.0, -43.0 // h
+ },
+
+ // Gradient
+ { 146.0, 484.0, // x
+ 0.0, 0.0, // y
+ 1450.0, 2604.0, // z
+ },
+
+ // Jacobian
+ // x y z
+ { /* f(x, y) */ -2.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, -4.0, 0.0, 0.0, 0.0, 0.0,
+ /* g(y, z) */ 0.0, 0.0, 0.0, 0.0, -20.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, -24.0,
+ /* h(z, x) */ -4.0, 0.0, 0.0, 0.0, -10.0, 0.0,
+ 0.0, -8.0, 0.0, 0.0, 0.0, -12.0
+ }
+ };
+
+ problem_.SetParameterBlockConstant(parameters_ + 2);
+ CheckAllEvaluationCombinations(expected);
+}
+
+TEST_F(StaticEvaluateTest, LocalParameterization) {
+ ExpectedEvaluation expected = {
+ // Rows/columns
+ 6, 5,
+ // Cost
+ 7607.0,
+ // Residuals
+ { -19.0, -35.0, // f
+ -59.0, -87.0, // g
+ -27.0, -43.0 // h
+ },
+ // Gradient
+ { 146.0, 484.0, // x
+ 1256.0, // y with SubsetParameterization
+ 1450.0, 2604.0, // z
+ },
+ // Jacobian
+ // x y z
+ { /* f(x, y) */ -2.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, -4.0, -16.0, 0.0, 0.0,
+ /* g(y, z) */ 0.0, 0.0, 0.0, -20.0, 0.0,
+ 0.0, 0.0, -8.0, 0.0, -24.0,
+ /* h(z, x) */ -4.0, 0.0, 0.0, -10.0, 0.0,
+ 0.0, -8.0, 0.0, 0.0, -12.0
+ }
+ };
+
+ vector<int> constant_parameters;
+ constant_parameters.push_back(0);
+ problem_.SetParameterization(parameters_ + 2,
+ new SubsetParameterization(2,
+ constant_parameters));
+
+ CheckAllEvaluationCombinations(expected);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/file.cc b/internal/ceres/file.cc
new file mode 100644
index 0000000..387f359
--- /dev/null
+++ b/internal/ceres/file.cc
@@ -0,0 +1,93 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Really simple file IO.
+
+#include <cstdio>
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+
+void WriteStringToFileOrDie(const string &data, const string &filename) {
+ FILE* file_descriptor = fopen(filename.c_str(), "wb");
+ if (!file_descriptor) {
+ LOG(FATAL) << "Couldn't write to file: " << filename;
+ }
+ fwrite(data.c_str(), 1, data.size(), file_descriptor);
+ fclose(file_descriptor);
+}
+
+void ReadFileToStringOrDie(const string &filename, string *data) {
+ FILE* file_descriptor = fopen(filename.c_str(), "r");
+
+ if (!file_descriptor) {
+ LOG(FATAL) << "Couldn't read file: " << filename;
+ }
+
+ // Resize the input buffer appropriately.
+ fseek(file_descriptor, 0L, SEEK_END);
+ int num_bytes = ftell(file_descriptor);
+ data->resize(num_bytes);
+
+ // Read the data.
+ fseek(file_descriptor, 0L, SEEK_SET);
+ int num_read = fread(&((*data)[0]),
+ sizeof((*data)[0]),
+ num_bytes,
+ file_descriptor);
+ if (num_read != num_bytes) {
+ LOG(FATAL) << "Couldn't read all of " << filename
+ << "expected bytes: " << num_bytes * sizeof((*data)[0])
+ << "actual bytes: " << num_read;
+ }
+ fclose(file_descriptor);
+}
+
+string JoinPath(const string& dirname, const string& basename) {
+#ifdef _WIN32
+ static const char separator = '\\';
+#else
+ static const char separator = '/';
+#endif // _WIN32
+
+ if ((!basename.empty() && basename[0] == separator) || dirname.empty()) {
+ return basename;
+ } else if (dirname[dirname.size() - 1] == separator) {
+ return dirname + basename;
+ } else {
+ return dirname + string(&separator, 1) + basename;
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/file.h b/internal/ceres/file.h
new file mode 100644
index 0000000..4741d65
--- /dev/null
+++ b/internal/ceres/file.h
@@ -0,0 +1,52 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Simple file IO support. This is a portability shim.
+
+#ifndef CERES_INTERNAL_FILE_H_
+#define CERES_INTERNAL_FILE_H_
+
+#include <string>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+void WriteStringToFileOrDie(const string &data, const string &filename);
+void ReadFileToStringOrDie(const string &filename, string *data);
+
+// Join two path components, adding a slash if necessary. If basename is an
+// absolute path then JoinPath ignores dirname and simply returns basename.
+string JoinPath(const string& dirname, const string& basename);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_FILE_H_
diff --git a/internal/ceres/generate_eliminator_specialization.py b/internal/ceres/generate_eliminator_specialization.py
new file mode 100644
index 0000000..af9873f
--- /dev/null
+++ b/internal/ceres/generate_eliminator_specialization.py
@@ -0,0 +1,186 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+# Author: sameeragarwal@google.com (Sameer Agarwal)
+#
+# Script for explicitly generating template specialization of the
+# SchurEliminator class. It is a rather large class
+# and the number of explicit instantiations is also large. Explicitly
+# generating these instantiations in separate .cc files breaks the
+# compilation into separate compilation unit rather than one large cc
+# file which takes 2+GB of RAM to compile.
+#
+# This script creates two sets of files.
+#
+# 1. schur_eliminator_x_x_x.cc
+# where, the x indicates the template parameters and
+#
+# 2. schur_eliminator.cc
+#
+# that contains a factory function for instantiating these classes
+# based on runtime parameters.
+#
+# The list of tuples, specializations indicates the set of
+# specializations that is generated.
+
+# Set of template specializations to generate
+SPECIALIZATIONS = [(2, 2, 2),
+ (2, 2, 3),
+ (2, 2, 4),
+ (2, 2, "Dynamic"),
+ (2, 3, 3),
+ (2, 3, 4),
+ (2, 3, 9),
+ (2, 3, "Dynamic"),
+ (2, 4, 3),
+ (2, 4, 4),
+ (2, 4, "Dynamic"),
+ (4, 4, 2),
+ (4, 4, 3),
+ (4, 4, 4),
+ (4, 4, "Dynamic"),
+ ("Dynamic", "Dynamic", "Dynamic")]
+
+SPECIALIZATION_FILE = """// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<%s, %s, %s>;
+
+} // namespace internal
+} // namespace ceres
+
+"""
+
+FACTORY_FILE_HEADER = """// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+SchurEliminatorBase*
+SchurEliminatorBase::Create(const LinearSolver::Options& options) {
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+"""
+
+FACTORY_CONDITIONAL = """ if ((options.row_block_size == %s) &&
+ (options.e_block_size == %s) &&
+ (options.f_block_size == %s)) {
+ return new SchurEliminator<%s, %s, %s>(options);
+ }
+"""
+
+FACTORY_FOOTER = """
+#endif
+ VLOG(1) << "Template specializations not found for <"
+ << options.row_block_size << ","
+ << options.e_block_size << ","
+ << options.f_block_size << ">";
+ return new SchurEliminator<Dynamic, Dynamic, Dynamic>(options);
+}
+
+} // namespace internal
+} // namespace ceres
+"""
+
+
+def SuffixForSize(size):
+ if size == "Dynamic":
+ return "d"
+ return str(size)
+
+
+def SpecializationFilename(prefix, row_block_size, e_block_size, f_block_size):
+ return "_".join([prefix] + map(SuffixForSize, (row_block_size,
+ e_block_size,
+ f_block_size)))
+
+
+def Specialize():
+ """
+ Generate specialization code and the conditionals to instantiate it.
+ """
+ f = open("schur_eliminator.cc", "w")
+ f.write(FACTORY_FILE_HEADER)
+
+ for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
+ output = SpecializationFilename("generated/schur_eliminator",
+ row_block_size,
+ e_block_size,
+ f_block_size) + ".cc"
+ fptr = open(output, "w")
+ fptr.write(SPECIALIZATION_FILE % (row_block_size,
+ e_block_size,
+ f_block_size))
+ fptr.close()
+
+ f.write(FACTORY_CONDITIONAL % (row_block_size,
+ e_block_size,
+ f_block_size,
+ row_block_size,
+ e_block_size,
+ f_block_size))
+ f.write(FACTORY_FOOTER)
+ f.close()
+
+
+if __name__ == "__main__":
+ Specialize()
diff --git a/internal/ceres/generated/schur_eliminator_2_2_2.cc b/internal/ceres/generated/schur_eliminator_2_2_2.cc
new file mode 100644
index 0000000..5529386
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_2_2.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 2, 2>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_2_2_3.cc b/internal/ceres/generated/schur_eliminator_2_2_3.cc
new file mode 100644
index 0000000..fd7af95
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_2_3.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 2, 3>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_2_2_4.cc b/internal/ceres/generated/schur_eliminator_2_2_4.cc
new file mode 100644
index 0000000..109483e
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_2_4.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 2, 4>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_2_2_d.cc b/internal/ceres/generated/schur_eliminator_2_2_d.cc
new file mode 100644
index 0000000..b93e82f
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_2_d.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 2, Dynamic>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_2_3_3.cc b/internal/ceres/generated/schur_eliminator_2_3_3.cc
new file mode 100644
index 0000000..86352c0
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_3_3.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 3, 3>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_2_3_4.cc b/internal/ceres/generated/schur_eliminator_2_3_4.cc
new file mode 100644
index 0000000..200df7f
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_3_4.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 3, 4>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_2_3_9.cc b/internal/ceres/generated/schur_eliminator_2_3_9.cc
new file mode 100644
index 0000000..1fda343
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_3_9.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 3, 9>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_2_3_d.cc b/internal/ceres/generated/schur_eliminator_2_3_d.cc
new file mode 100644
index 0000000..385cd2d
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_3_d.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 3, Dynamic>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_2_4_3.cc b/internal/ceres/generated/schur_eliminator_2_4_3.cc
new file mode 100644
index 0000000..7b15d63
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_3.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, 3>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_2_4_4.cc b/internal/ceres/generated/schur_eliminator_2_4_4.cc
new file mode 100644
index 0000000..29a610d
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_4.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, 4>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_2_4_d.cc b/internal/ceres/generated/schur_eliminator_2_4_d.cc
new file mode 100644
index 0000000..a3bc4dc
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_2_4_d.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, Dynamic>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_4_4_2.cc b/internal/ceres/generated/schur_eliminator_4_4_2.cc
new file mode 100644
index 0000000..f71a4f6
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_4_4_2.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<4, 4, 2>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_4_4_3.cc b/internal/ceres/generated/schur_eliminator_4_4_3.cc
new file mode 100644
index 0000000..52259fb
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_4_4_3.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<4, 4, 3>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_4_4_4.cc b/internal/ceres/generated/schur_eliminator_4_4_4.cc
new file mode 100644
index 0000000..775424e
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_4_4_4.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<4, 4, 4>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_4_4_d.cc b/internal/ceres/generated/schur_eliminator_4_4_d.cc
new file mode 100644
index 0000000..97cde59
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_4_4_d.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<4, 4, Dynamic>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/generated/schur_eliminator_d_d_d.cc b/internal/ceres/generated/schur_eliminator_d_d_d.cc
new file mode 100644
index 0000000..4cba32e
--- /dev/null
+++ b/internal/ceres/generated/schur_eliminator_d_d_d.cc
@@ -0,0 +1,53 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<Dynamic, Dynamic, Dynamic>;
+
+} // namespace internal
+} // namespace ceres
+
diff --git a/internal/ceres/gmock/gmock.h b/internal/ceres/gmock/gmock.h
new file mode 100644
index 0000000..2ce6dc1
--- /dev/null
+++ b/internal/ceres/gmock/gmock.h
@@ -0,0 +1,12822 @@
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This is the main header file a user should include.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_H_
+
+// This file implements the following syntax:
+//
+// ON_CALL(mock_object.Method(...))
+// .With(...) ?
+// .WillByDefault(...);
+//
+// where With() is optional and WillByDefault() must appear exactly
+// once.
+//
+// EXPECT_CALL(mock_object.Method(...))
+// .With(...) ?
+// .Times(...) ?
+// .InSequence(...) *
+// .WillOnce(...) *
+// .WillRepeatedly(...) ?
+// .RetiresOnSaturation() ? ;
+//
+// where all clauses are optional and WillOnce() can be repeated.
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some commonly used actions.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_
+
+#include <algorithm>
+#include <string>
+
+#ifndef _WIN32_WCE
+# include <errno.h>
+#endif
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file defines some utilities useful for implementing Google
+// Mock. They are subject to change without notice, so please DO NOT
+// USE THEM IN USER CODE.
+
+#ifndef GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_INTERNAL_UTILS_H_
+#define GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_INTERNAL_UTILS_H_
+
+#include <stdio.h>
+#include <ostream> // NOLINT
+#include <string>
+
+// This file was GENERATED by a script. DO NOT EDIT BY HAND!!!
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file contains template meta-programming utility classes needed
+// for implementing Google Mock.
+
+#ifndef GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_GENERATED_INTERNAL_UTILS_H_
+#define GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_GENERATED_INTERNAL_UTILS_H_
+
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vadimb@google.com (Vadim Berman)
+//
+// Low-level types and utilities for porting Google Mock to various
+// platforms. They are subject to change without notice. DO NOT USE
+// THEM IN USER CODE.
+
+#ifndef GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PORT_H_
+#define GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PORT_H_
+
+#include <assert.h>
+#include <stdlib.h>
+#include <iostream>
+
+// Most of the types needed for porting Google Mock are also required
+// for Google Test and are defined in gtest-port.h.
+#include "gtest/gtest.h"
+
+// To avoid conditional compilation everywhere, we make it
+// gmock-port.h's responsibility to #include the header implementing
+// tr1/tuple. gmock-port.h does this via gtest-port.h, which is
+// guaranteed to pull in the tuple header.
+
+// For MS Visual C++, check the compiler version. At least VS 2003 is
+// required to compile Google Mock.
+#if defined(_MSC_VER) && _MSC_VER < 1310
+# error "At least Visual C++ 2003 (7.1) is required to compile Google Mock."
+#endif
+
+// Macro for referencing flags. This is public as we want the user to
+// use this syntax to reference Google Mock flags.
+#define GMOCK_FLAG(name) FLAGS_gmock_##name
+
+// Macros for declaring flags.
+#define GMOCK_DECLARE_bool_(name) extern bool GMOCK_FLAG(name)
+#define GMOCK_DECLARE_int32_(name) \
+ extern ::testing::internal::Int32 GMOCK_FLAG(name)
+#define GMOCK_DECLARE_string_(name) \
+ extern ::testing::internal::String GMOCK_FLAG(name)
+
+// Macros for defining flags.
+#define GMOCK_DEFINE_bool_(name, default_val, doc) \
+ bool GMOCK_FLAG(name) = (default_val)
+#define GMOCK_DEFINE_int32_(name, default_val, doc) \
+ ::testing::internal::Int32 GMOCK_FLAG(name) = (default_val)
+#define GMOCK_DEFINE_string_(name, default_val, doc) \
+ ::testing::internal::String GMOCK_FLAG(name) = (default_val)
+
+#endif // GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PORT_H_
+
+namespace testing {
+
+template <typename T>
+class Matcher;
+
+namespace internal {
+
+// An IgnoredValue object can be implicitly constructed from ANY value.
+// This is used in implementing the IgnoreResult(a) action.
+class IgnoredValue {
+ public:
+ // This constructor template allows any value to be implicitly
+ // converted to IgnoredValue. The object has no data member and
+ // doesn't try to remember anything about the argument. We
+ // deliberately omit the 'explicit' keyword in order to allow the
+ // conversion to be implicit.
+ template <typename T>
+ IgnoredValue(const T&) {}
+};
+
+// MatcherTuple<T>::type is a tuple type where each field is a Matcher
+// for the corresponding field in tuple type T.
+template <typename Tuple>
+struct MatcherTuple;
+
+template <>
+struct MatcherTuple< ::std::tr1::tuple<> > {
+ typedef ::std::tr1::tuple< > type;
+};
+
+template <typename A1>
+struct MatcherTuple< ::std::tr1::tuple<A1> > {
+ typedef ::std::tr1::tuple<Matcher<A1> > type;
+};
+
+template <typename A1, typename A2>
+struct MatcherTuple< ::std::tr1::tuple<A1, A2> > {
+ typedef ::std::tr1::tuple<Matcher<A1>, Matcher<A2> > type;
+};
+
+template <typename A1, typename A2, typename A3>
+struct MatcherTuple< ::std::tr1::tuple<A1, A2, A3> > {
+ typedef ::std::tr1::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4>
+struct MatcherTuple< ::std::tr1::tuple<A1, A2, A3, A4> > {
+ typedef ::std::tr1::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>,
+ Matcher<A4> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5>
+struct MatcherTuple< ::std::tr1::tuple<A1, A2, A3, A4, A5> > {
+ typedef ::std::tr1::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>, Matcher<A4>,
+ Matcher<A5> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6>
+struct MatcherTuple< ::std::tr1::tuple<A1, A2, A3, A4, A5, A6> > {
+ typedef ::std::tr1::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>, Matcher<A4>,
+ Matcher<A5>, Matcher<A6> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7>
+struct MatcherTuple< ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7> > {
+ typedef ::std::tr1::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>, Matcher<A4>,
+ Matcher<A5>, Matcher<A6>, Matcher<A7> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7, typename A8>
+struct MatcherTuple< ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7, A8> > {
+ typedef ::std::tr1::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>, Matcher<A4>,
+ Matcher<A5>, Matcher<A6>, Matcher<A7>, Matcher<A8> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7, typename A8, typename A9>
+struct MatcherTuple< ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7, A8, A9> > {
+ typedef ::std::tr1::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>, Matcher<A4>,
+ Matcher<A5>, Matcher<A6>, Matcher<A7>, Matcher<A8>, Matcher<A9> > type;
+};
+
+template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7, typename A8, typename A9, typename A10>
+struct MatcherTuple< ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7, A8, A9,
+ A10> > {
+ typedef ::std::tr1::tuple<Matcher<A1>, Matcher<A2>, Matcher<A3>, Matcher<A4>,
+ Matcher<A5>, Matcher<A6>, Matcher<A7>, Matcher<A8>, Matcher<A9>,
+ Matcher<A10> > type;
+};
+
+// Template struct Function<F>, where F must be a function type, contains
+// the following typedefs:
+//
+// Result: the function's return type.
+// ArgumentN: the type of the N-th argument, where N starts with 1.
+// ArgumentTuple: the tuple type consisting of all parameters of F.
+// ArgumentMatcherTuple: the tuple type consisting of Matchers for all
+// parameters of F.
+// MakeResultVoid: the function type obtained by substituting void
+// for the return type of F.
+// MakeResultIgnoredValue:
+// the function type obtained by substituting Something
+// for the return type of F.
+template <typename F>
+struct Function;
+
+template <typename R>
+struct Function<R()> {
+ typedef R Result;
+ typedef ::std::tr1::tuple<> ArgumentTuple;
+ typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+ typedef void MakeResultVoid();
+ typedef IgnoredValue MakeResultIgnoredValue();
+};
+
+template <typename R, typename A1>
+struct Function<R(A1)>
+ : Function<R()> {
+ typedef A1 Argument1;
+ typedef ::std::tr1::tuple<A1> ArgumentTuple;
+ typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+ typedef void MakeResultVoid(A1);
+ typedef IgnoredValue MakeResultIgnoredValue(A1);
+};
+
+template <typename R, typename A1, typename A2>
+struct Function<R(A1, A2)>
+ : Function<R(A1)> {
+ typedef A2 Argument2;
+ typedef ::std::tr1::tuple<A1, A2> ArgumentTuple;
+ typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+ typedef void MakeResultVoid(A1, A2);
+ typedef IgnoredValue MakeResultIgnoredValue(A1, A2);
+};
+
+template <typename R, typename A1, typename A2, typename A3>
+struct Function<R(A1, A2, A3)>
+ : Function<R(A1, A2)> {
+ typedef A3 Argument3;
+ typedef ::std::tr1::tuple<A1, A2, A3> ArgumentTuple;
+ typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+ typedef void MakeResultVoid(A1, A2, A3);
+ typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4>
+struct Function<R(A1, A2, A3, A4)>
+ : Function<R(A1, A2, A3)> {
+ typedef A4 Argument4;
+ typedef ::std::tr1::tuple<A1, A2, A3, A4> ArgumentTuple;
+ typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+ typedef void MakeResultVoid(A1, A2, A3, A4);
+ typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5>
+struct Function<R(A1, A2, A3, A4, A5)>
+ : Function<R(A1, A2, A3, A4)> {
+ typedef A5 Argument5;
+ typedef ::std::tr1::tuple<A1, A2, A3, A4, A5> ArgumentTuple;
+ typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+ typedef void MakeResultVoid(A1, A2, A3, A4, A5);
+ typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4, A5);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6>
+struct Function<R(A1, A2, A3, A4, A5, A6)>
+ : Function<R(A1, A2, A3, A4, A5)> {
+ typedef A6 Argument6;
+ typedef ::std::tr1::tuple<A1, A2, A3, A4, A5, A6> ArgumentTuple;
+ typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+ typedef void MakeResultVoid(A1, A2, A3, A4, A5, A6);
+ typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4, A5, A6);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7>
+struct Function<R(A1, A2, A3, A4, A5, A6, A7)>
+ : Function<R(A1, A2, A3, A4, A5, A6)> {
+ typedef A7 Argument7;
+ typedef ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7> ArgumentTuple;
+ typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+ typedef void MakeResultVoid(A1, A2, A3, A4, A5, A6, A7);
+ typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4, A5, A6, A7);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7, typename A8>
+struct Function<R(A1, A2, A3, A4, A5, A6, A7, A8)>
+ : Function<R(A1, A2, A3, A4, A5, A6, A7)> {
+ typedef A8 Argument8;
+ typedef ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7, A8> ArgumentTuple;
+ typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+ typedef void MakeResultVoid(A1, A2, A3, A4, A5, A6, A7, A8);
+ typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4, A5, A6, A7, A8);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7, typename A8, typename A9>
+struct Function<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)>
+ : Function<R(A1, A2, A3, A4, A5, A6, A7, A8)> {
+ typedef A9 Argument9;
+ typedef ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7, A8, A9> ArgumentTuple;
+ typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+ typedef void MakeResultVoid(A1, A2, A3, A4, A5, A6, A7, A8, A9);
+ typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4, A5, A6, A7, A8,
+ A9);
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7, typename A8, typename A9,
+ typename A10>
+struct Function<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)>
+ : Function<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)> {
+ typedef A10 Argument10;
+ typedef ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7, A8, A9,
+ A10> ArgumentTuple;
+ typedef typename MatcherTuple<ArgumentTuple>::type ArgumentMatcherTuple;
+ typedef void MakeResultVoid(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10);
+ typedef IgnoredValue MakeResultIgnoredValue(A1, A2, A3, A4, A5, A6, A7, A8,
+ A9, A10);
+};
+
+} // namespace internal
+
+} // namespace testing
+
+#endif // GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_GENERATED_INTERNAL_UTILS_H_
+
+namespace testing {
+namespace internal {
+
+// Converts an identifier name to a space-separated list of lower-case
+// words. Each maximum substring of the form [A-Za-z][a-z]*|\d+ is
+// treated as one word. For example, both "FooBar123" and
+// "foo_bar_123" are converted to "foo bar 123".
+string ConvertIdentifierNameToWords(const char* id_name);
+
+// PointeeOf<Pointer>::type is the type of a value pointed to by a
+// Pointer, which can be either a smart pointer or a raw pointer. The
+// following default implementation is for the case where Pointer is a
+// smart pointer.
+template <typename Pointer>
+struct PointeeOf {
+ // Smart pointer classes define type element_type as the type of
+ // their pointees.
+ typedef typename Pointer::element_type type;
+};
+// This specialization is for the raw pointer case.
+template <typename T>
+struct PointeeOf<T*> { typedef T type; }; // NOLINT
+
+// GetRawPointer(p) returns the raw pointer underlying p when p is a
+// smart pointer, or returns p itself when p is already a raw pointer.
+// The following default implementation is for the smart pointer case.
+template <typename Pointer>
+inline typename Pointer::element_type* GetRawPointer(const Pointer& p) {
+ return p.get();
+}
+// This overloaded version is for the raw pointer case.
+template <typename Element>
+inline Element* GetRawPointer(Element* p) { return p; }
+
+// This comparator allows linked_ptr to be stored in sets.
+template <typename T>
+struct LinkedPtrLessThan {
+ bool operator()(const ::testing::internal::linked_ptr<T>& lhs,
+ const ::testing::internal::linked_ptr<T>& rhs) const {
+ return lhs.get() < rhs.get();
+ }
+};
+
+// Symbian compilation can be done with wchar_t being either a native
+// type or a typedef. Using Google Mock with OpenC without wchar_t
+// should require the definition of _STLP_NO_WCHAR_T.
+//
+// MSVC treats wchar_t as a native type usually, but treats it as the
+// same as unsigned short when the compiler option /Zc:wchar_t- is
+// specified. It defines _NATIVE_WCHAR_T_DEFINED symbol when wchar_t
+// is a native type.
+#if (GTEST_OS_SYMBIAN && defined(_STLP_NO_WCHAR_T)) || \
+ (defined(_MSC_VER) && !defined(_NATIVE_WCHAR_T_DEFINED))
+// wchar_t is a typedef.
+#else
+# define GMOCK_WCHAR_T_IS_NATIVE_ 1
+#endif
+
+// signed wchar_t and unsigned wchar_t are NOT in the C++ standard.
+// Using them is a bad practice and not portable. So DON'T use them.
+//
+// Still, Google Mock is designed to work even if the user uses signed
+// wchar_t or unsigned wchar_t (obviously, assuming the compiler
+// supports them).
+//
+// To gcc,
+// wchar_t == signed wchar_t != unsigned wchar_t == unsigned int
+#ifdef __GNUC__
+// signed/unsigned wchar_t are valid types.
+# define GMOCK_HAS_SIGNED_WCHAR_T_ 1
+#endif
+
+// In what follows, we use the term "kind" to indicate whether a type
+// is bool, an integer type (excluding bool), a floating-point type,
+// or none of them. This categorization is useful for determining
+// when a matcher argument type can be safely converted to another
+// type in the implementation of SafeMatcherCast.
+enum TypeKind {
+ kBool, kInteger, kFloatingPoint, kOther
+};
+
+// KindOf<T>::value is the kind of type T.
+template <typename T> struct KindOf {
+ enum { value = kOther }; // The default kind.
+};
+
+// This macro declares that the kind of 'type' is 'kind'.
+#define GMOCK_DECLARE_KIND_(type, kind) \
+ template <> struct KindOf<type> { enum { value = kind }; }
+
+GMOCK_DECLARE_KIND_(bool, kBool);
+
+// All standard integer types.
+GMOCK_DECLARE_KIND_(char, kInteger);
+GMOCK_DECLARE_KIND_(signed char, kInteger);
+GMOCK_DECLARE_KIND_(unsigned char, kInteger);
+GMOCK_DECLARE_KIND_(short, kInteger); // NOLINT
+GMOCK_DECLARE_KIND_(unsigned short, kInteger); // NOLINT
+GMOCK_DECLARE_KIND_(int, kInteger);
+GMOCK_DECLARE_KIND_(unsigned int, kInteger);
+GMOCK_DECLARE_KIND_(long, kInteger); // NOLINT
+GMOCK_DECLARE_KIND_(unsigned long, kInteger); // NOLINT
+
+#if GMOCK_WCHAR_T_IS_NATIVE_
+GMOCK_DECLARE_KIND_(wchar_t, kInteger);
+#endif
+
+// Non-standard integer types.
+GMOCK_DECLARE_KIND_(Int64, kInteger);
+GMOCK_DECLARE_KIND_(UInt64, kInteger);
+
+// All standard floating-point types.
+GMOCK_DECLARE_KIND_(float, kFloatingPoint);
+GMOCK_DECLARE_KIND_(double, kFloatingPoint);
+GMOCK_DECLARE_KIND_(long double, kFloatingPoint);
+
+#undef GMOCK_DECLARE_KIND_
+
+// Evaluates to the kind of 'type'.
+#define GMOCK_KIND_OF_(type) \
+ static_cast< ::testing::internal::TypeKind>( \
+ ::testing::internal::KindOf<type>::value)
+
+// Evaluates to true iff integer type T is signed.
+#define GMOCK_IS_SIGNED_(T) (static_cast<T>(-1) < 0)
+
+// LosslessArithmeticConvertibleImpl<kFromKind, From, kToKind, To>::value
+// is true iff arithmetic type From can be losslessly converted to
+// arithmetic type To.
+//
+// It's the user's responsibility to ensure that both From and To are
+// raw (i.e. has no CV modifier, is not a pointer, and is not a
+// reference) built-in arithmetic types, kFromKind is the kind of
+// From, and kToKind is the kind of To; the value is
+// implementation-defined when the above pre-condition is violated.
+template <TypeKind kFromKind, typename From, TypeKind kToKind, typename To>
+struct LosslessArithmeticConvertibleImpl : public false_type {};
+
+// Converting bool to bool is lossless.
+template <>
+struct LosslessArithmeticConvertibleImpl<kBool, bool, kBool, bool>
+ : public true_type {}; // NOLINT
+
+// Converting bool to any integer type is lossless.
+template <typename To>
+struct LosslessArithmeticConvertibleImpl<kBool, bool, kInteger, To>
+ : public true_type {}; // NOLINT
+
+// Converting bool to any floating-point type is lossless.
+template <typename To>
+struct LosslessArithmeticConvertibleImpl<kBool, bool, kFloatingPoint, To>
+ : public true_type {}; // NOLINT
+
+// Converting an integer to bool is lossy.
+template <typename From>
+struct LosslessArithmeticConvertibleImpl<kInteger, From, kBool, bool>
+ : public false_type {}; // NOLINT
+
+// Converting an integer to another non-bool integer is lossless iff
+// the target type's range encloses the source type's range.
+template <typename From, typename To>
+struct LosslessArithmeticConvertibleImpl<kInteger, From, kInteger, To>
+ : public bool_constant<
+ // When converting from a smaller size to a larger size, we are
+ // fine as long as we are not converting from signed to unsigned.
+ ((sizeof(From) < sizeof(To)) &&
+ (!GMOCK_IS_SIGNED_(From) || GMOCK_IS_SIGNED_(To))) ||
+ // When converting between the same size, the signedness must match.
+ ((sizeof(From) == sizeof(To)) &&
+ (GMOCK_IS_SIGNED_(From) == GMOCK_IS_SIGNED_(To)))> {}; // NOLINT
+
+#undef GMOCK_IS_SIGNED_
+
+// Converting an integer to a floating-point type may be lossy, since
+// the format of a floating-point number is implementation-defined.
+template <typename From, typename To>
+struct LosslessArithmeticConvertibleImpl<kInteger, From, kFloatingPoint, To>
+ : public false_type {}; // NOLINT
+
+// Converting a floating-point to bool is lossy.
+template <typename From>
+struct LosslessArithmeticConvertibleImpl<kFloatingPoint, From, kBool, bool>
+ : public false_type {}; // NOLINT
+
+// Converting a floating-point to an integer is lossy.
+template <typename From, typename To>
+struct LosslessArithmeticConvertibleImpl<kFloatingPoint, From, kInteger, To>
+ : public false_type {}; // NOLINT
+
+// Converting a floating-point to another floating-point is lossless
+// iff the target type is at least as big as the source type.
+template <typename From, typename To>
+struct LosslessArithmeticConvertibleImpl<
+ kFloatingPoint, From, kFloatingPoint, To>
+ : public bool_constant<sizeof(From) <= sizeof(To)> {}; // NOLINT
+
+// LosslessArithmeticConvertible<From, To>::value is true iff arithmetic
+// type From can be losslessly converted to arithmetic type To.
+//
+// It's the user's responsibility to ensure that both From and To are
+// raw (i.e. has no CV modifier, is not a pointer, and is not a
+// reference) built-in arithmetic types; the value is
+// implementation-defined when the above pre-condition is violated.
+template <typename From, typename To>
+struct LosslessArithmeticConvertible
+ : public LosslessArithmeticConvertibleImpl<
+ GMOCK_KIND_OF_(From), From, GMOCK_KIND_OF_(To), To> {}; // NOLINT
+
+// This interface knows how to report a Google Mock failure (either
+// non-fatal or fatal).
+class FailureReporterInterface {
+ public:
+ // The type of a failure (either non-fatal or fatal).
+ enum FailureType {
+ NONFATAL, FATAL
+ };
+
+ virtual ~FailureReporterInterface() {}
+
+ // Reports a failure that occurred at the given source file location.
+ virtual void ReportFailure(FailureType type, const char* file, int line,
+ const string& message) = 0;
+};
+
+// Returns the failure reporter used by Google Mock.
+FailureReporterInterface* GetFailureReporter();
+
+// Asserts that condition is true; aborts the process with the given
+// message if condition is false. We cannot use LOG(FATAL) or CHECK()
+// as Google Mock might be used to mock the log sink itself. We
+// inline this function to prevent it from showing up in the stack
+// trace.
+inline void Assert(bool condition, const char* file, int line,
+ const string& msg) {
+ if (!condition) {
+ GetFailureReporter()->ReportFailure(FailureReporterInterface::FATAL,
+ file, line, msg);
+ }
+}
+inline void Assert(bool condition, const char* file, int line) {
+ Assert(condition, file, line, "Assertion failed.");
+}
+
+// Verifies that condition is true; generates a non-fatal failure if
+// condition is false.
+inline void Expect(bool condition, const char* file, int line,
+ const string& msg) {
+ if (!condition) {
+ GetFailureReporter()->ReportFailure(FailureReporterInterface::NONFATAL,
+ file, line, msg);
+ }
+}
+inline void Expect(bool condition, const char* file, int line) {
+ Expect(condition, file, line, "Expectation failed.");
+}
+
+// Severity level of a log.
+enum LogSeverity {
+ INFO = 0,
+ WARNING = 1
+};
+
+// Valid values for the --gmock_verbose flag.
+
+// All logs (informational and warnings) are printed.
+const char kInfoVerbosity[] = "info";
+// Only warnings are printed.
+const char kWarningVerbosity[] = "warning";
+// No logs are printed.
+const char kErrorVerbosity[] = "error";
+
+// Returns true iff a log with the given severity is visible according
+// to the --gmock_verbose flag.
+bool LogIsVisible(LogSeverity severity);
+
+// Prints the given message to stdout iff 'severity' >= the level
+// specified by the --gmock_verbose flag. If stack_frames_to_skip >=
+// 0, also prints the stack trace excluding the top
+// stack_frames_to_skip frames. In opt mode, any positive
+// stack_frames_to_skip is treated as 0, since we don't know which
+// function calls will be inlined by the compiler and need to be
+// conservative.
+void Log(LogSeverity severity, const string& message, int stack_frames_to_skip);
+
+// TODO(wan@google.com): group all type utilities together.
+
+// Type traits.
+
+// is_reference<T>::value is non-zero iff T is a reference type.
+template <typename T> struct is_reference : public false_type {};
+template <typename T> struct is_reference<T&> : public true_type {};
+
+// type_equals<T1, T2>::value is non-zero iff T1 and T2 are the same type.
+template <typename T1, typename T2> struct type_equals : public false_type {};
+template <typename T> struct type_equals<T, T> : public true_type {};
+
+// remove_reference<T>::type removes the reference from type T, if any.
+template <typename T> struct remove_reference { typedef T type; }; // NOLINT
+template <typename T> struct remove_reference<T&> { typedef T type; }; // NOLINT
+
+// Invalid<T>() returns an invalid value of type T. This is useful
+// when a value of type T is needed for compilation, but the statement
+// will not really be executed (or we don't care if the statement
+// crashes).
+template <typename T>
+inline T Invalid() {
+ return *static_cast<typename remove_reference<T>::type*>(NULL);
+}
+template <>
+inline void Invalid<void>() {}
+
+// Given a raw type (i.e. having no top-level reference or const
+// modifier) RawContainer that's either an STL-style container or a
+// native array, class StlContainerView<RawContainer> has the
+// following members:
+//
+// - type is a type that provides an STL-style container view to
+// (i.e. implements the STL container concept for) RawContainer;
+// - const_reference is a type that provides a reference to a const
+// RawContainer;
+// - ConstReference(raw_container) returns a const reference to an STL-style
+// container view to raw_container, which is a RawContainer.
+// - Copy(raw_container) returns an STL-style container view of a
+// copy of raw_container, which is a RawContainer.
+//
+// This generic version is used when RawContainer itself is already an
+// STL-style container.
+template <class RawContainer>
+class StlContainerView {
+ public:
+ typedef RawContainer type;
+ typedef const type& const_reference;
+
+ static const_reference ConstReference(const RawContainer& container) {
+ // Ensures that RawContainer is not a const type.
+ testing::StaticAssertTypeEq<RawContainer,
+ GTEST_REMOVE_CONST_(RawContainer)>();
+ return container;
+ }
+ static type Copy(const RawContainer& container) { return container; }
+};
+
+// This specialization is used when RawContainer is a native array type.
+template <typename Element, size_t N>
+class StlContainerView<Element[N]> {
+ public:
+ typedef GTEST_REMOVE_CONST_(Element) RawElement;
+ typedef internal::NativeArray<RawElement> type;
+ // NativeArray<T> can represent a native array either by value or by
+ // reference (selected by a constructor argument), so 'const type'
+ // can be used to reference a const native array. We cannot
+ // 'typedef const type& const_reference' here, as that would mean
+ // ConstReference() has to return a reference to a local variable.
+ typedef const type const_reference;
+
+ static const_reference ConstReference(const Element (&array)[N]) {
+ // Ensures that Element is not a const type.
+ testing::StaticAssertTypeEq<Element, RawElement>();
+#if GTEST_OS_SYMBIAN
+ // The Nokia Symbian compiler confuses itself in template instantiation
+ // for this call without the cast to Element*:
+ // function call '[testing::internal::NativeArray<char *>].NativeArray(
+ // {lval} const char *[4], long, testing::internal::RelationToSource)'
+ // does not match
+ // 'testing::internal::NativeArray<char *>::NativeArray(
+ // char *const *, unsigned int, testing::internal::RelationToSource)'
+ // (instantiating: 'testing::internal::ContainsMatcherImpl
+ // <const char * (&)[4]>::Matches(const char * (&)[4]) const')
+ // (instantiating: 'testing::internal::StlContainerView<char *[4]>::
+ // ConstReference(const char * (&)[4])')
+ // (and though the N parameter type is mismatched in the above explicit
+ // conversion of it doesn't help - only the conversion of the array).
+ return type(const_cast<Element*>(&array[0]), N, kReference);
+#else
+ return type(array, N, kReference);
+#endif // GTEST_OS_SYMBIAN
+ }
+ static type Copy(const Element (&array)[N]) {
+#if GTEST_OS_SYMBIAN
+ return type(const_cast<Element*>(&array[0]), N, kCopy);
+#else
+ return type(array, N, kCopy);
+#endif // GTEST_OS_SYMBIAN
+ }
+};
+
+// This specialization is used when RawContainer is a native array
+// represented as a (pointer, size) tuple.
+template <typename ElementPointer, typename Size>
+class StlContainerView< ::std::tr1::tuple<ElementPointer, Size> > {
+ public:
+ typedef GTEST_REMOVE_CONST_(
+ typename internal::PointeeOf<ElementPointer>::type) RawElement;
+ typedef internal::NativeArray<RawElement> type;
+ typedef const type const_reference;
+
+ static const_reference ConstReference(
+ const ::std::tr1::tuple<ElementPointer, Size>& array) {
+ using ::std::tr1::get;
+ return type(get<0>(array), get<1>(array), kReference);
+ }
+ static type Copy(const ::std::tr1::tuple<ElementPointer, Size>& array) {
+ using ::std::tr1::get;
+ return type(get<0>(array), get<1>(array), kCopy);
+ }
+};
+
+// The following specialization prevents the user from instantiating
+// StlContainer with a reference type.
+template <typename T> class StlContainerView<T&>;
+
+} // namespace internal
+} // namespace testing
+
+#endif // GMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_INTERNAL_UTILS_H_
+
+namespace testing {
+
+// To implement an action Foo, define:
+// 1. a class FooAction that implements the ActionInterface interface, and
+// 2. a factory function that creates an Action object from a
+// const FooAction*.
+//
+// The two-level delegation design follows that of Matcher, providing
+// consistency for extension developers. It also eases ownership
+// management as Action objects can now be copied like plain values.
+
+namespace internal {
+
+template <typename F1, typename F2>
+class ActionAdaptor;
+
+// BuiltInDefaultValue<T>::Get() returns the "built-in" default
+// value for type T, which is NULL when T is a pointer type, 0 when T
+// is a numeric type, false when T is bool, or "" when T is string or
+// std::string. For any other type T, this value is undefined and the
+// function will abort the process.
+template <typename T>
+class BuiltInDefaultValue {
+ public:
+ // This function returns true iff type T has a built-in default value.
+ static bool Exists() { return false; }
+ static T Get() {
+ Assert(false, __FILE__, __LINE__,
+ "Default action undefined for the function return type.");
+ return internal::Invalid<T>();
+ // The above statement will never be reached, but is required in
+ // order for this function to compile.
+ }
+};
+
+// This partial specialization says that we use the same built-in
+// default value for T and const T.
+template <typename T>
+class BuiltInDefaultValue<const T> {
+ public:
+ static bool Exists() { return BuiltInDefaultValue<T>::Exists(); }
+ static T Get() { return BuiltInDefaultValue<T>::Get(); }
+};
+
+// This partial specialization defines the default values for pointer
+// types.
+template <typename T>
+class BuiltInDefaultValue<T*> {
+ public:
+ static bool Exists() { return true; }
+ static T* Get() { return NULL; }
+};
+
+// The following specializations define the default values for
+// specific types we care about.
+#define GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(type, value) \
+ template <> \
+ class BuiltInDefaultValue<type> { \
+ public: \
+ static bool Exists() { return true; } \
+ static type Get() { return value; } \
+ }
+
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(void, ); // NOLINT
+#if GTEST_HAS_GLOBAL_STRING
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(::string, "");
+#endif // GTEST_HAS_GLOBAL_STRING
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(::std::string, "");
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(bool, false);
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned char, '\0');
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed char, '\0');
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(char, '\0');
+
+// There's no need for a default action for signed wchar_t, as that
+// type is the same as wchar_t for gcc, and invalid for MSVC.
+//
+// There's also no need for a default action for unsigned wchar_t, as
+// that type is the same as unsigned int for gcc, and invalid for
+// MSVC.
+#if GMOCK_WCHAR_T_IS_NATIVE_
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(wchar_t, 0U); // NOLINT
+#endif
+
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned short, 0U); // NOLINT
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed short, 0); // NOLINT
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned int, 0U);
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed int, 0);
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long, 0UL); // NOLINT
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long, 0L); // NOLINT
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(UInt64, 0);
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(Int64, 0);
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(float, 0);
+GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(double, 0);
+
+#undef GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_
+
+} // namespace internal
+
+// When an unexpected function call is encountered, Google Mock will
+// let it return a default value if the user has specified one for its
+// return type, or if the return type has a built-in default value;
+// otherwise Google Mock won't know what value to return and will have
+// to abort the process.
+//
+// The DefaultValue<T> class allows a user to specify the
+// default value for a type T that is both copyable and publicly
+// destructible (i.e. anything that can be used as a function return
+// type). The usage is:
+//
+// // Sets the default value for type T to be foo.
+// DefaultValue<T>::Set(foo);
+template <typename T>
+class DefaultValue {
+ public:
+ // Sets the default value for type T; requires T to be
+ // copy-constructable and have a public destructor.
+ static void Set(T x) {
+ delete value_;
+ value_ = new T(x);
+ }
+
+ // Unsets the default value for type T.
+ static void Clear() {
+ delete value_;
+ value_ = NULL;
+ }
+
+ // Returns true iff the user has set the default value for type T.
+ static bool IsSet() { return value_ != NULL; }
+
+ // Returns true if T has a default return value set by the user or there
+ // exists a built-in default value.
+ static bool Exists() {
+ return IsSet() || internal::BuiltInDefaultValue<T>::Exists();
+ }
+
+ // Returns the default value for type T if the user has set one;
+ // otherwise returns the built-in default value if there is one;
+ // otherwise aborts the process.
+ static T Get() {
+ return value_ == NULL ?
+ internal::BuiltInDefaultValue<T>::Get() : *value_;
+ }
+ private:
+ static const T* value_;
+};
+
+// This partial specialization allows a user to set default values for
+// reference types.
+template <typename T>
+class DefaultValue<T&> {
+ public:
+ // Sets the default value for type T&.
+ static void Set(T& x) { // NOLINT
+ address_ = &x;
+ }
+
+ // Unsets the default value for type T&.
+ static void Clear() {
+ address_ = NULL;
+ }
+
+ // Returns true iff the user has set the default value for type T&.
+ static bool IsSet() { return address_ != NULL; }
+
+ // Returns true if T has a default return value set by the user or there
+ // exists a built-in default value.
+ static bool Exists() {
+ return IsSet() || internal::BuiltInDefaultValue<T&>::Exists();
+ }
+
+ // Returns the default value for type T& if the user has set one;
+ // otherwise returns the built-in default value if there is one;
+ // otherwise aborts the process.
+ static T& Get() {
+ return address_ == NULL ?
+ internal::BuiltInDefaultValue<T&>::Get() : *address_;
+ }
+ private:
+ static T* address_;
+};
+
+// This specialization allows DefaultValue<void>::Get() to
+// compile.
+template <>
+class DefaultValue<void> {
+ public:
+ static bool Exists() { return true; }
+ static void Get() {}
+};
+
+// Points to the user-set default value for type T.
+template <typename T>
+const T* DefaultValue<T>::value_ = NULL;
+
+// Points to the user-set default value for type T&.
+template <typename T>
+T* DefaultValue<T&>::address_ = NULL;
+
+// Implement this interface to define an action for function type F.
+template <typename F>
+class ActionInterface {
+ public:
+ typedef typename internal::Function<F>::Result Result;
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ ActionInterface() {}
+ virtual ~ActionInterface() {}
+
+ // Performs the action. This method is not const, as in general an
+ // action can have side effects and be stateful. For example, a
+ // get-the-next-element-from-the-collection action will need to
+ // remember the current element.
+ virtual Result Perform(const ArgumentTuple& args) = 0;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ActionInterface);
+};
+
+// An Action<F> is a copyable and IMMUTABLE (except by assignment)
+// object that represents an action to be taken when a mock function
+// of type F is called. The implementation of Action<T> is just a
+// linked_ptr to const ActionInterface<T>, so copying is fairly cheap.
+// Don't inherit from Action!
+//
+// You can view an object implementing ActionInterface<F> as a
+// concrete action (including its current state), and an Action<F>
+// object as a handle to it.
+template <typename F>
+class Action {
+ public:
+ typedef typename internal::Function<F>::Result Result;
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ // Constructs a null Action. Needed for storing Action objects in
+ // STL containers.
+ Action() : impl_(NULL) {}
+
+ // Constructs an Action from its implementation. A NULL impl is
+ // used to represent the "do-default" action.
+ explicit Action(ActionInterface<F>* impl) : impl_(impl) {}
+
+ // Copy constructor.
+ Action(const Action& action) : impl_(action.impl_) {}
+
+ // This constructor allows us to turn an Action<Func> object into an
+ // Action<F>, as long as F's arguments can be implicitly converted
+ // to Func's and Func's return type can be implicitly converted to
+ // F's.
+ template <typename Func>
+ explicit Action(const Action<Func>& action);
+
+ // Returns true iff this is the DoDefault() action.
+ bool IsDoDefault() const { return impl_.get() == NULL; }
+
+ // Performs the action. Note that this method is const even though
+ // the corresponding method in ActionInterface is not. The reason
+ // is that a const Action<F> means that it cannot be re-bound to
+ // another concrete action, not that the concrete action it binds to
+ // cannot change state. (Think of the difference between a const
+ // pointer and a pointer to const.)
+ Result Perform(const ArgumentTuple& args) const {
+ internal::Assert(
+ !IsDoDefault(), __FILE__, __LINE__,
+ "You are using DoDefault() inside a composite action like "
+ "DoAll() or WithArgs(). This is not supported for technical "
+ "reasons. Please instead spell out the default action, or "
+ "assign the default action to an Action variable and use "
+ "the variable in various places.");
+ return impl_->Perform(args);
+ }
+
+ private:
+ template <typename F1, typename F2>
+ friend class internal::ActionAdaptor;
+
+ internal::linked_ptr<ActionInterface<F> > impl_;
+};
+
+// The PolymorphicAction class template makes it easy to implement a
+// polymorphic action (i.e. an action that can be used in mock
+// functions of than one type, e.g. Return()).
+//
+// To define a polymorphic action, a user first provides a COPYABLE
+// implementation class that has a Perform() method template:
+//
+// class FooAction {
+// public:
+// template <typename Result, typename ArgumentTuple>
+// Result Perform(const ArgumentTuple& args) const {
+// // Processes the arguments and returns a result, using
+// // tr1::get<N>(args) to get the N-th (0-based) argument in the tuple.
+// }
+// ...
+// };
+//
+// Then the user creates the polymorphic action using
+// MakePolymorphicAction(object) where object has type FooAction. See
+// the definition of Return(void) and SetArgumentPointee<N>(value) for
+// complete examples.
+template <typename Impl>
+class PolymorphicAction {
+ public:
+ explicit PolymorphicAction(const Impl& impl) : impl_(impl) {}
+
+ template <typename F>
+ operator Action<F>() const {
+ return Action<F>(new MonomorphicImpl<F>(impl_));
+ }
+
+ private:
+ template <typename F>
+ class MonomorphicImpl : public ActionInterface<F> {
+ public:
+ typedef typename internal::Function<F>::Result Result;
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {}
+
+ virtual Result Perform(const ArgumentTuple& args) {
+ return impl_.template Perform<Result>(args);
+ }
+
+ private:
+ Impl impl_;
+
+ GTEST_DISALLOW_ASSIGN_(MonomorphicImpl);
+ };
+
+ Impl impl_;
+
+ GTEST_DISALLOW_ASSIGN_(PolymorphicAction);
+};
+
+// Creates an Action from its implementation and returns it. The
+// created Action object owns the implementation.
+template <typename F>
+Action<F> MakeAction(ActionInterface<F>* impl) {
+ return Action<F>(impl);
+}
+
+// Creates a polymorphic action from its implementation. This is
+// easier to use than the PolymorphicAction<Impl> constructor as it
+// doesn't require you to explicitly write the template argument, e.g.
+//
+// MakePolymorphicAction(foo);
+// vs
+// PolymorphicAction<TypeOfFoo>(foo);
+template <typename Impl>
+inline PolymorphicAction<Impl> MakePolymorphicAction(const Impl& impl) {
+ return PolymorphicAction<Impl>(impl);
+}
+
+namespace internal {
+
+// Allows an Action<F2> object to pose as an Action<F1>, as long as F2
+// and F1 are compatible.
+template <typename F1, typename F2>
+class ActionAdaptor : public ActionInterface<F1> {
+ public:
+ typedef typename internal::Function<F1>::Result Result;
+ typedef typename internal::Function<F1>::ArgumentTuple ArgumentTuple;
+
+ explicit ActionAdaptor(const Action<F2>& from) : impl_(from.impl_) {}
+
+ virtual Result Perform(const ArgumentTuple& args) {
+ return impl_->Perform(args);
+ }
+
+ private:
+ const internal::linked_ptr<ActionInterface<F2> > impl_;
+
+ GTEST_DISALLOW_ASSIGN_(ActionAdaptor);
+};
+
+// Implements the polymorphic Return(x) action, which can be used in
+// any function that returns the type of x, regardless of the argument
+// types.
+//
+// Note: The value passed into Return must be converted into
+// Function<F>::Result when this action is cast to Action<F> rather than
+// when that action is performed. This is important in scenarios like
+//
+// MOCK_METHOD1(Method, T(U));
+// ...
+// {
+// Foo foo;
+// X x(&foo);
+// EXPECT_CALL(mock, Method(_)).WillOnce(Return(x));
+// }
+//
+// In the example above the variable x holds reference to foo which leaves
+// scope and gets destroyed. If copying X just copies a reference to foo,
+// that copy will be left with a hanging reference. If conversion to T
+// makes a copy of foo, the above code is safe. To support that scenario, we
+// need to make sure that the type conversion happens inside the EXPECT_CALL
+// statement, and conversion of the result of Return to Action<T(U)> is a
+// good place for that.
+//
+template <typename R>
+class ReturnAction {
+ public:
+ // Constructs a ReturnAction object from the value to be returned.
+ // 'value' is passed by value instead of by const reference in order
+ // to allow Return("string literal") to compile.
+ explicit ReturnAction(R value) : value_(value) {}
+
+ // This template type conversion operator allows Return(x) to be
+ // used in ANY function that returns x's type.
+ template <typename F>
+ operator Action<F>() const {
+ // Assert statement belongs here because this is the best place to verify
+ // conditions on F. It produces the clearest error messages
+ // in most compilers.
+ // Impl really belongs in this scope as a local class but can't
+ // because MSVC produces duplicate symbols in different translation units
+ // in this case. Until MS fixes that bug we put Impl into the class scope
+ // and put the typedef both here (for use in assert statement) and
+ // in the Impl class. But both definitions must be the same.
+ typedef typename Function<F>::Result Result;
+ GTEST_COMPILE_ASSERT_(
+ !internal::is_reference<Result>::value,
+ use_ReturnRef_instead_of_Return_to_return_a_reference);
+ return Action<F>(new Impl<F>(value_));
+ }
+
+ private:
+ // Implements the Return(x) action for a particular function type F.
+ template <typename F>
+ class Impl : public ActionInterface<F> {
+ public:
+ typedef typename Function<F>::Result Result;
+ typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+
+ // The implicit cast is necessary when Result has more than one
+ // single-argument constructor (e.g. Result is std::vector<int>) and R
+ // has a type conversion operator template. In that case, value_(value)
+ // won't compile as the compiler doesn't known which constructor of
+ // Result to call. ImplicitCast_ forces the compiler to convert R to
+ // Result without considering explicit constructors, thus resolving the
+ // ambiguity. value_ is then initialized using its copy constructor.
+ explicit Impl(R value)
+ : value_(::testing::internal::ImplicitCast_<Result>(value)) {}
+
+ virtual Result Perform(const ArgumentTuple&) { return value_; }
+
+ private:
+ GTEST_COMPILE_ASSERT_(!internal::is_reference<Result>::value,
+ Result_cannot_be_a_reference_type);
+ Result value_;
+
+ GTEST_DISALLOW_ASSIGN_(Impl);
+ };
+
+ R value_;
+
+ GTEST_DISALLOW_ASSIGN_(ReturnAction);
+};
+
+// Implements the ReturnNull() action.
+class ReturnNullAction {
+ public:
+ // Allows ReturnNull() to be used in any pointer-returning function.
+ template <typename Result, typename ArgumentTuple>
+ static Result Perform(const ArgumentTuple&) {
+ GTEST_COMPILE_ASSERT_(internal::is_pointer<Result>::value,
+ ReturnNull_can_be_used_to_return_a_pointer_only);
+ return NULL;
+ }
+};
+
+// Implements the Return() action.
+class ReturnVoidAction {
+ public:
+ // Allows Return() to be used in any void-returning function.
+ template <typename Result, typename ArgumentTuple>
+ static void Perform(const ArgumentTuple&) {
+ CompileAssertTypesEqual<void, Result>();
+ }
+};
+
+// Implements the polymorphic ReturnRef(x) action, which can be used
+// in any function that returns a reference to the type of x,
+// regardless of the argument types.
+template <typename T>
+class ReturnRefAction {
+ public:
+ // Constructs a ReturnRefAction object from the reference to be returned.
+ explicit ReturnRefAction(T& ref) : ref_(ref) {} // NOLINT
+
+ // This template type conversion operator allows ReturnRef(x) to be
+ // used in ANY function that returns a reference to x's type.
+ template <typename F>
+ operator Action<F>() const {
+ typedef typename Function<F>::Result Result;
+ // Asserts that the function return type is a reference. This
+ // catches the user error of using ReturnRef(x) when Return(x)
+ // should be used, and generates some helpful error message.
+ GTEST_COMPILE_ASSERT_(internal::is_reference<Result>::value,
+ use_Return_instead_of_ReturnRef_to_return_a_value);
+ return Action<F>(new Impl<F>(ref_));
+ }
+
+ private:
+ // Implements the ReturnRef(x) action for a particular function type F.
+ template <typename F>
+ class Impl : public ActionInterface<F> {
+ public:
+ typedef typename Function<F>::Result Result;
+ typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+
+ explicit Impl(T& ref) : ref_(ref) {} // NOLINT
+
+ virtual Result Perform(const ArgumentTuple&) {
+ return ref_;
+ }
+
+ private:
+ T& ref_;
+
+ GTEST_DISALLOW_ASSIGN_(Impl);
+ };
+
+ T& ref_;
+
+ GTEST_DISALLOW_ASSIGN_(ReturnRefAction);
+};
+
+// Implements the polymorphic ReturnRefOfCopy(x) action, which can be
+// used in any function that returns a reference to the type of x,
+// regardless of the argument types.
+template <typename T>
+class ReturnRefOfCopyAction {
+ public:
+ // Constructs a ReturnRefOfCopyAction object from the reference to
+ // be returned.
+ explicit ReturnRefOfCopyAction(const T& value) : value_(value) {} // NOLINT
+
+ // This template type conversion operator allows ReturnRefOfCopy(x) to be
+ // used in ANY function that returns a reference to x's type.
+ template <typename F>
+ operator Action<F>() const {
+ typedef typename Function<F>::Result Result;
+ // Asserts that the function return type is a reference. This
+ // catches the user error of using ReturnRefOfCopy(x) when Return(x)
+ // should be used, and generates some helpful error message.
+ GTEST_COMPILE_ASSERT_(
+ internal::is_reference<Result>::value,
+ use_Return_instead_of_ReturnRefOfCopy_to_return_a_value);
+ return Action<F>(new Impl<F>(value_));
+ }
+
+ private:
+ // Implements the ReturnRefOfCopy(x) action for a particular function type F.
+ template <typename F>
+ class Impl : public ActionInterface<F> {
+ public:
+ typedef typename Function<F>::Result Result;
+ typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+
+ explicit Impl(const T& value) : value_(value) {} // NOLINT
+
+ virtual Result Perform(const ArgumentTuple&) {
+ return value_;
+ }
+
+ private:
+ T value_;
+
+ GTEST_DISALLOW_ASSIGN_(Impl);
+ };
+
+ const T value_;
+
+ GTEST_DISALLOW_ASSIGN_(ReturnRefOfCopyAction);
+};
+
+// Implements the polymorphic DoDefault() action.
+class DoDefaultAction {
+ public:
+ // This template type conversion operator allows DoDefault() to be
+ // used in any function.
+ template <typename F>
+ operator Action<F>() const { return Action<F>(NULL); }
+};
+
+// Implements the Assign action to set a given pointer referent to a
+// particular value.
+template <typename T1, typename T2>
+class AssignAction {
+ public:
+ AssignAction(T1* ptr, T2 value) : ptr_(ptr), value_(value) {}
+
+ template <typename Result, typename ArgumentTuple>
+ void Perform(const ArgumentTuple& /* args */) const {
+ *ptr_ = value_;
+ }
+
+ private:
+ T1* const ptr_;
+ const T2 value_;
+
+ GTEST_DISALLOW_ASSIGN_(AssignAction);
+};
+
+#if !GTEST_OS_WINDOWS_MOBILE
+
+// Implements the SetErrnoAndReturn action to simulate return from
+// various system calls and libc functions.
+template <typename T>
+class SetErrnoAndReturnAction {
+ public:
+ SetErrnoAndReturnAction(int errno_value, T result)
+ : errno_(errno_value),
+ result_(result) {}
+ template <typename Result, typename ArgumentTuple>
+ Result Perform(const ArgumentTuple& /* args */) const {
+ errno = errno_;
+ return result_;
+ }
+
+ private:
+ const int errno_;
+ const T result_;
+
+ GTEST_DISALLOW_ASSIGN_(SetErrnoAndReturnAction);
+};
+
+#endif // !GTEST_OS_WINDOWS_MOBILE
+
+// Implements the SetArgumentPointee<N>(x) action for any function
+// whose N-th argument (0-based) is a pointer to x's type. The
+// template parameter kIsProto is true iff type A is ProtocolMessage,
+// proto2::Message, or a sub-class of those.
+template <size_t N, typename A, bool kIsProto>
+class SetArgumentPointeeAction {
+ public:
+ // Constructs an action that sets the variable pointed to by the
+ // N-th function argument to 'value'.
+ explicit SetArgumentPointeeAction(const A& value) : value_(value) {}
+
+ template <typename Result, typename ArgumentTuple>
+ void Perform(const ArgumentTuple& args) const {
+ CompileAssertTypesEqual<void, Result>();
+ *::std::tr1::get<N>(args) = value_;
+ }
+
+ private:
+ const A value_;
+
+ GTEST_DISALLOW_ASSIGN_(SetArgumentPointeeAction);
+};
+
+template <size_t N, typename Proto>
+class SetArgumentPointeeAction<N, Proto, true> {
+ public:
+ // Constructs an action that sets the variable pointed to by the
+ // N-th function argument to 'proto'. Both ProtocolMessage and
+ // proto2::Message have the CopyFrom() method, so the same
+ // implementation works for both.
+ explicit SetArgumentPointeeAction(const Proto& proto) : proto_(new Proto) {
+ proto_->CopyFrom(proto);
+ }
+
+ template <typename Result, typename ArgumentTuple>
+ void Perform(const ArgumentTuple& args) const {
+ CompileAssertTypesEqual<void, Result>();
+ ::std::tr1::get<N>(args)->CopyFrom(*proto_);
+ }
+
+ private:
+ const internal::linked_ptr<Proto> proto_;
+
+ GTEST_DISALLOW_ASSIGN_(SetArgumentPointeeAction);
+};
+
+// Implements the InvokeWithoutArgs(f) action. The template argument
+// FunctionImpl is the implementation type of f, which can be either a
+// function pointer or a functor. InvokeWithoutArgs(f) can be used as an
+// Action<F> as long as f's type is compatible with F (i.e. f can be
+// assigned to a tr1::function<F>).
+template <typename FunctionImpl>
+class InvokeWithoutArgsAction {
+ public:
+ // The c'tor makes a copy of function_impl (either a function
+ // pointer or a functor).
+ explicit InvokeWithoutArgsAction(FunctionImpl function_impl)
+ : function_impl_(function_impl) {}
+
+ // Allows InvokeWithoutArgs(f) to be used as any action whose type is
+ // compatible with f.
+ template <typename Result, typename ArgumentTuple>
+ Result Perform(const ArgumentTuple&) { return function_impl_(); }
+
+ private:
+ FunctionImpl function_impl_;
+
+ GTEST_DISALLOW_ASSIGN_(InvokeWithoutArgsAction);
+};
+
+// Implements the InvokeWithoutArgs(object_ptr, &Class::Method) action.
+template <class Class, typename MethodPtr>
+class InvokeMethodWithoutArgsAction {
+ public:
+ InvokeMethodWithoutArgsAction(Class* obj_ptr, MethodPtr method_ptr)
+ : obj_ptr_(obj_ptr), method_ptr_(method_ptr) {}
+
+ template <typename Result, typename ArgumentTuple>
+ Result Perform(const ArgumentTuple&) const {
+ return (obj_ptr_->*method_ptr_)();
+ }
+
+ private:
+ Class* const obj_ptr_;
+ const MethodPtr method_ptr_;
+
+ GTEST_DISALLOW_ASSIGN_(InvokeMethodWithoutArgsAction);
+};
+
+// Implements the IgnoreResult(action) action.
+template <typename A>
+class IgnoreResultAction {
+ public:
+ explicit IgnoreResultAction(const A& action) : action_(action) {}
+
+ template <typename F>
+ operator Action<F>() const {
+ // Assert statement belongs here because this is the best place to verify
+ // conditions on F. It produces the clearest error messages
+ // in most compilers.
+ // Impl really belongs in this scope as a local class but can't
+ // because MSVC produces duplicate symbols in different translation units
+ // in this case. Until MS fixes that bug we put Impl into the class scope
+ // and put the typedef both here (for use in assert statement) and
+ // in the Impl class. But both definitions must be the same.
+ typedef typename internal::Function<F>::Result Result;
+
+ // Asserts at compile time that F returns void.
+ CompileAssertTypesEqual<void, Result>();
+
+ return Action<F>(new Impl<F>(action_));
+ }
+
+ private:
+ template <typename F>
+ class Impl : public ActionInterface<F> {
+ public:
+ typedef typename internal::Function<F>::Result Result;
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ explicit Impl(const A& action) : action_(action) {}
+
+ virtual void Perform(const ArgumentTuple& args) {
+ // Performs the action and ignores its result.
+ action_.Perform(args);
+ }
+
+ private:
+ // Type OriginalFunction is the same as F except that its return
+ // type is IgnoredValue.
+ typedef typename internal::Function<F>::MakeResultIgnoredValue
+ OriginalFunction;
+
+ const Action<OriginalFunction> action_;
+
+ GTEST_DISALLOW_ASSIGN_(Impl);
+ };
+
+ const A action_;
+
+ GTEST_DISALLOW_ASSIGN_(IgnoreResultAction);
+};
+
+// A ReferenceWrapper<T> object represents a reference to type T,
+// which can be either const or not. It can be explicitly converted
+// from, and implicitly converted to, a T&. Unlike a reference,
+// ReferenceWrapper<T> can be copied and can survive template type
+// inference. This is used to support by-reference arguments in the
+// InvokeArgument<N>(...) action. The idea was from "reference
+// wrappers" in tr1, which we don't have in our source tree yet.
+template <typename T>
+class ReferenceWrapper {
+ public:
+ // Constructs a ReferenceWrapper<T> object from a T&.
+ explicit ReferenceWrapper(T& l_value) : pointer_(&l_value) {} // NOLINT
+
+ // Allows a ReferenceWrapper<T> object to be implicitly converted to
+ // a T&.
+ operator T&() const { return *pointer_; }
+ private:
+ T* pointer_;
+};
+
+// Allows the expression ByRef(x) to be printed as a reference to x.
+template <typename T>
+void PrintTo(const ReferenceWrapper<T>& ref, ::std::ostream* os) {
+ T& value = ref;
+ UniversalPrinter<T&>::Print(value, os);
+}
+
+// Does two actions sequentially. Used for implementing the DoAll(a1,
+// a2, ...) action.
+template <typename Action1, typename Action2>
+class DoBothAction {
+ public:
+ DoBothAction(Action1 action1, Action2 action2)
+ : action1_(action1), action2_(action2) {}
+
+ // This template type conversion operator allows DoAll(a1, ..., a_n)
+ // to be used in ANY function of compatible type.
+ template <typename F>
+ operator Action<F>() const {
+ return Action<F>(new Impl<F>(action1_, action2_));
+ }
+
+ private:
+ // Implements the DoAll(...) action for a particular function type F.
+ template <typename F>
+ class Impl : public ActionInterface<F> {
+ public:
+ typedef typename Function<F>::Result Result;
+ typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+ typedef typename Function<F>::MakeResultVoid VoidResult;
+
+ Impl(const Action<VoidResult>& action1, const Action<F>& action2)
+ : action1_(action1), action2_(action2) {}
+
+ virtual Result Perform(const ArgumentTuple& args) {
+ action1_.Perform(args);
+ return action2_.Perform(args);
+ }
+
+ private:
+ const Action<VoidResult> action1_;
+ const Action<F> action2_;
+
+ GTEST_DISALLOW_ASSIGN_(Impl);
+ };
+
+ Action1 action1_;
+ Action2 action2_;
+
+ GTEST_DISALLOW_ASSIGN_(DoBothAction);
+};
+
+} // namespace internal
+
+// An Unused object can be implicitly constructed from ANY value.
+// This is handy when defining actions that ignore some or all of the
+// mock function arguments. For example, given
+//
+// MOCK_METHOD3(Foo, double(const string& label, double x, double y));
+// MOCK_METHOD3(Bar, double(int index, double x, double y));
+//
+// instead of
+//
+// double DistanceToOriginWithLabel(const string& label, double x, double y) {
+// return sqrt(x*x + y*y);
+// }
+// double DistanceToOriginWithIndex(int index, double x, double y) {
+// return sqrt(x*x + y*y);
+// }
+// ...
+// EXEPCT_CALL(mock, Foo("abc", _, _))
+// .WillOnce(Invoke(DistanceToOriginWithLabel));
+// EXEPCT_CALL(mock, Bar(5, _, _))
+// .WillOnce(Invoke(DistanceToOriginWithIndex));
+//
+// you could write
+//
+// // We can declare any uninteresting argument as Unused.
+// double DistanceToOrigin(Unused, double x, double y) {
+// return sqrt(x*x + y*y);
+// }
+// ...
+// EXEPCT_CALL(mock, Foo("abc", _, _)).WillOnce(Invoke(DistanceToOrigin));
+// EXEPCT_CALL(mock, Bar(5, _, _)).WillOnce(Invoke(DistanceToOrigin));
+typedef internal::IgnoredValue Unused;
+
+// This constructor allows us to turn an Action<From> object into an
+// Action<To>, as long as To's arguments can be implicitly converted
+// to From's and From's return type cann be implicitly converted to
+// To's.
+template <typename To>
+template <typename From>
+Action<To>::Action(const Action<From>& from)
+ : impl_(new internal::ActionAdaptor<To, From>(from)) {}
+
+// Creates an action that returns 'value'. 'value' is passed by value
+// instead of const reference - otherwise Return("string literal")
+// will trigger a compiler error about using array as initializer.
+template <typename R>
+internal::ReturnAction<R> Return(R value) {
+ return internal::ReturnAction<R>(value);
+}
+
+// Creates an action that returns NULL.
+inline PolymorphicAction<internal::ReturnNullAction> ReturnNull() {
+ return MakePolymorphicAction(internal::ReturnNullAction());
+}
+
+// Creates an action that returns from a void function.
+inline PolymorphicAction<internal::ReturnVoidAction> Return() {
+ return MakePolymorphicAction(internal::ReturnVoidAction());
+}
+
+// Creates an action that returns the reference to a variable.
+template <typename R>
+inline internal::ReturnRefAction<R> ReturnRef(R& x) { // NOLINT
+ return internal::ReturnRefAction<R>(x);
+}
+
+// Creates an action that returns the reference to a copy of the
+// argument. The copy is created when the action is constructed and
+// lives as long as the action.
+template <typename R>
+inline internal::ReturnRefOfCopyAction<R> ReturnRefOfCopy(const R& x) {
+ return internal::ReturnRefOfCopyAction<R>(x);
+}
+
+// Creates an action that does the default action for the give mock function.
+inline internal::DoDefaultAction DoDefault() {
+ return internal::DoDefaultAction();
+}
+
+// Creates an action that sets the variable pointed by the N-th
+// (0-based) function argument to 'value'.
+template <size_t N, typename T>
+PolymorphicAction<
+ internal::SetArgumentPointeeAction<
+ N, T, internal::IsAProtocolMessage<T>::value> >
+SetArgPointee(const T& x) {
+ return MakePolymorphicAction(internal::SetArgumentPointeeAction<
+ N, T, internal::IsAProtocolMessage<T>::value>(x));
+}
+
+#if !((GTEST_GCC_VER_ && GTEST_GCC_VER_ < 40000) || GTEST_OS_SYMBIAN)
+// This overload allows SetArgPointee() to accept a string literal.
+// GCC prior to the version 4.0 and Symbian C++ compiler cannot distinguish
+// this overload from the templated version and emit a compile error.
+template <size_t N>
+PolymorphicAction<
+ internal::SetArgumentPointeeAction<N, const char*, false> >
+SetArgPointee(const char* p) {
+ return MakePolymorphicAction(internal::SetArgumentPointeeAction<
+ N, const char*, false>(p));
+}
+
+template <size_t N>
+PolymorphicAction<
+ internal::SetArgumentPointeeAction<N, const wchar_t*, false> >
+SetArgPointee(const wchar_t* p) {
+ return MakePolymorphicAction(internal::SetArgumentPointeeAction<
+ N, const wchar_t*, false>(p));
+}
+#endif
+
+// The following version is DEPRECATED.
+template <size_t N, typename T>
+PolymorphicAction<
+ internal::SetArgumentPointeeAction<
+ N, T, internal::IsAProtocolMessage<T>::value> >
+SetArgumentPointee(const T& x) {
+ return MakePolymorphicAction(internal::SetArgumentPointeeAction<
+ N, T, internal::IsAProtocolMessage<T>::value>(x));
+}
+
+// Creates an action that sets a pointer referent to a given value.
+template <typename T1, typename T2>
+PolymorphicAction<internal::AssignAction<T1, T2> > Assign(T1* ptr, T2 val) {
+ return MakePolymorphicAction(internal::AssignAction<T1, T2>(ptr, val));
+}
+
+#if !GTEST_OS_WINDOWS_MOBILE
+
+// Creates an action that sets errno and returns the appropriate error.
+template <typename T>
+PolymorphicAction<internal::SetErrnoAndReturnAction<T> >
+SetErrnoAndReturn(int errval, T result) {
+ return MakePolymorphicAction(
+ internal::SetErrnoAndReturnAction<T>(errval, result));
+}
+
+#endif // !GTEST_OS_WINDOWS_MOBILE
+
+// Various overloads for InvokeWithoutArgs().
+
+// Creates an action that invokes 'function_impl' with no argument.
+template <typename FunctionImpl>
+PolymorphicAction<internal::InvokeWithoutArgsAction<FunctionImpl> >
+InvokeWithoutArgs(FunctionImpl function_impl) {
+ return MakePolymorphicAction(
+ internal::InvokeWithoutArgsAction<FunctionImpl>(function_impl));
+}
+
+// Creates an action that invokes the given method on the given object
+// with no argument.
+template <class Class, typename MethodPtr>
+PolymorphicAction<internal::InvokeMethodWithoutArgsAction<Class, MethodPtr> >
+InvokeWithoutArgs(Class* obj_ptr, MethodPtr method_ptr) {
+ return MakePolymorphicAction(
+ internal::InvokeMethodWithoutArgsAction<Class, MethodPtr>(
+ obj_ptr, method_ptr));
+}
+
+// Creates an action that performs an_action and throws away its
+// result. In other words, it changes the return type of an_action to
+// void. an_action MUST NOT return void, or the code won't compile.
+template <typename A>
+inline internal::IgnoreResultAction<A> IgnoreResult(const A& an_action) {
+ return internal::IgnoreResultAction<A>(an_action);
+}
+
+// Creates a reference wrapper for the given L-value. If necessary,
+// you can explicitly specify the type of the reference. For example,
+// suppose 'derived' is an object of type Derived, ByRef(derived)
+// would wrap a Derived&. If you want to wrap a const Base& instead,
+// where Base is a base class of Derived, just write:
+//
+// ByRef<const Base>(derived)
+template <typename T>
+inline internal::ReferenceWrapper<T> ByRef(T& l_value) { // NOLINT
+ return internal::ReferenceWrapper<T>(l_value);
+}
+
+} // namespace testing
+
+#endif // GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some commonly used cardinalities. More
+// cardinalities can be defined by the user implementing the
+// CardinalityInterface interface if necessary.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_
+
+#include <limits.h>
+#include <ostream> // NOLINT
+
+namespace testing {
+
+// To implement a cardinality Foo, define:
+// 1. a class FooCardinality that implements the
+// CardinalityInterface interface, and
+// 2. a factory function that creates a Cardinality object from a
+// const FooCardinality*.
+//
+// The two-level delegation design follows that of Matcher, providing
+// consistency for extension developers. It also eases ownership
+// management as Cardinality objects can now be copied like plain values.
+
+// The implementation of a cardinality.
+class CardinalityInterface {
+ public:
+ virtual ~CardinalityInterface() {}
+
+ // Conservative estimate on the lower/upper bound of the number of
+ // calls allowed.
+ virtual int ConservativeLowerBound() const { return 0; }
+ virtual int ConservativeUpperBound() const { return INT_MAX; }
+
+ // Returns true iff call_count calls will satisfy this cardinality.
+ virtual bool IsSatisfiedByCallCount(int call_count) const = 0;
+
+ // Returns true iff call_count calls will saturate this cardinality.
+ virtual bool IsSaturatedByCallCount(int call_count) const = 0;
+
+ // Describes self to an ostream.
+ virtual void DescribeTo(::std::ostream* os) const = 0;
+};
+
+// A Cardinality is a copyable and IMMUTABLE (except by assignment)
+// object that specifies how many times a mock function is expected to
+// be called. The implementation of Cardinality is just a linked_ptr
+// to const CardinalityInterface, so copying is fairly cheap.
+// Don't inherit from Cardinality!
+class Cardinality {
+ public:
+ // Constructs a null cardinality. Needed for storing Cardinality
+ // objects in STL containers.
+ Cardinality() {}
+
+ // Constructs a Cardinality from its implementation.
+ explicit Cardinality(const CardinalityInterface* impl) : impl_(impl) {}
+
+ // Conservative estimate on the lower/upper bound of the number of
+ // calls allowed.
+ int ConservativeLowerBound() const { return impl_->ConservativeLowerBound(); }
+ int ConservativeUpperBound() const { return impl_->ConservativeUpperBound(); }
+
+ // Returns true iff call_count calls will satisfy this cardinality.
+ bool IsSatisfiedByCallCount(int call_count) const {
+ return impl_->IsSatisfiedByCallCount(call_count);
+ }
+
+ // Returns true iff call_count calls will saturate this cardinality.
+ bool IsSaturatedByCallCount(int call_count) const {
+ return impl_->IsSaturatedByCallCount(call_count);
+ }
+
+ // Returns true iff call_count calls will over-saturate this
+ // cardinality, i.e. exceed the maximum number of allowed calls.
+ bool IsOverSaturatedByCallCount(int call_count) const {
+ return impl_->IsSaturatedByCallCount(call_count) &&
+ !impl_->IsSatisfiedByCallCount(call_count);
+ }
+
+ // Describes self to an ostream
+ void DescribeTo(::std::ostream* os) const { impl_->DescribeTo(os); }
+
+ // Describes the given actual call count to an ostream.
+ static void DescribeActualCallCountTo(int actual_call_count,
+ ::std::ostream* os);
+ private:
+ internal::linked_ptr<const CardinalityInterface> impl_;
+};
+
+// Creates a cardinality that allows at least n calls.
+Cardinality AtLeast(int n);
+
+// Creates a cardinality that allows at most n calls.
+Cardinality AtMost(int n);
+
+// Creates a cardinality that allows any number of calls.
+Cardinality AnyNumber();
+
+// Creates a cardinality that allows between min and max calls.
+Cardinality Between(int min, int max);
+
+// Creates a cardinality that allows exactly n calls.
+Cardinality Exactly(int n);
+
+// Creates a cardinality from its implementation.
+inline Cardinality MakeCardinality(const CardinalityInterface* c) {
+ return Cardinality(c);
+}
+
+} // namespace testing
+
+#endif // GMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_
+// This file was GENERATED by a script. DO NOT EDIT BY HAND!!!
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some commonly used variadic actions.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_
+
+
+namespace testing {
+namespace internal {
+
+// InvokeHelper<F> knows how to unpack an N-tuple and invoke an N-ary
+// function or method with the unpacked values, where F is a function
+// type that takes N arguments.
+template <typename Result, typename ArgumentTuple>
+class InvokeHelper;
+
+template <typename R>
+class InvokeHelper<R, ::std::tr1::tuple<> > {
+ public:
+ template <typename Function>
+ static R Invoke(Function function, const ::std::tr1::tuple<>&) {
+ return function();
+ }
+
+ template <class Class, typename MethodPtr>
+ static R InvokeMethod(Class* obj_ptr,
+ MethodPtr method_ptr,
+ const ::std::tr1::tuple<>&) {
+ return (obj_ptr->*method_ptr)();
+ }
+};
+
+template <typename R, typename A1>
+class InvokeHelper<R, ::std::tr1::tuple<A1> > {
+ public:
+ template <typename Function>
+ static R Invoke(Function function, const ::std::tr1::tuple<A1>& args) {
+ using ::std::tr1::get;
+ return function(get<0>(args));
+ }
+
+ template <class Class, typename MethodPtr>
+ static R InvokeMethod(Class* obj_ptr,
+ MethodPtr method_ptr,
+ const ::std::tr1::tuple<A1>& args) {
+ using ::std::tr1::get;
+ return (obj_ptr->*method_ptr)(get<0>(args));
+ }
+};
+
+template <typename R, typename A1, typename A2>
+class InvokeHelper<R, ::std::tr1::tuple<A1, A2> > {
+ public:
+ template <typename Function>
+ static R Invoke(Function function, const ::std::tr1::tuple<A1, A2>& args) {
+ using ::std::tr1::get;
+ return function(get<0>(args), get<1>(args));
+ }
+
+ template <class Class, typename MethodPtr>
+ static R InvokeMethod(Class* obj_ptr,
+ MethodPtr method_ptr,
+ const ::std::tr1::tuple<A1, A2>& args) {
+ using ::std::tr1::get;
+ return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3>
+class InvokeHelper<R, ::std::tr1::tuple<A1, A2, A3> > {
+ public:
+ template <typename Function>
+ static R Invoke(Function function, const ::std::tr1::tuple<A1, A2,
+ A3>& args) {
+ using ::std::tr1::get;
+ return function(get<0>(args), get<1>(args), get<2>(args));
+ }
+
+ template <class Class, typename MethodPtr>
+ static R InvokeMethod(Class* obj_ptr,
+ MethodPtr method_ptr,
+ const ::std::tr1::tuple<A1, A2, A3>& args) {
+ using ::std::tr1::get;
+ return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4>
+class InvokeHelper<R, ::std::tr1::tuple<A1, A2, A3, A4> > {
+ public:
+ template <typename Function>
+ static R Invoke(Function function, const ::std::tr1::tuple<A1, A2, A3,
+ A4>& args) {
+ using ::std::tr1::get;
+ return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args));
+ }
+
+ template <class Class, typename MethodPtr>
+ static R InvokeMethod(Class* obj_ptr,
+ MethodPtr method_ptr,
+ const ::std::tr1::tuple<A1, A2, A3, A4>& args) {
+ using ::std::tr1::get;
+ return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args),
+ get<3>(args));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5>
+class InvokeHelper<R, ::std::tr1::tuple<A1, A2, A3, A4, A5> > {
+ public:
+ template <typename Function>
+ static R Invoke(Function function, const ::std::tr1::tuple<A1, A2, A3, A4,
+ A5>& args) {
+ using ::std::tr1::get;
+ return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args),
+ get<4>(args));
+ }
+
+ template <class Class, typename MethodPtr>
+ static R InvokeMethod(Class* obj_ptr,
+ MethodPtr method_ptr,
+ const ::std::tr1::tuple<A1, A2, A3, A4, A5>& args) {
+ using ::std::tr1::get;
+ return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args),
+ get<3>(args), get<4>(args));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6>
+class InvokeHelper<R, ::std::tr1::tuple<A1, A2, A3, A4, A5, A6> > {
+ public:
+ template <typename Function>
+ static R Invoke(Function function, const ::std::tr1::tuple<A1, A2, A3, A4,
+ A5, A6>& args) {
+ using ::std::tr1::get;
+ return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args),
+ get<4>(args), get<5>(args));
+ }
+
+ template <class Class, typename MethodPtr>
+ static R InvokeMethod(Class* obj_ptr,
+ MethodPtr method_ptr,
+ const ::std::tr1::tuple<A1, A2, A3, A4, A5, A6>& args) {
+ using ::std::tr1::get;
+ return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args),
+ get<3>(args), get<4>(args), get<5>(args));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7>
+class InvokeHelper<R, ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7> > {
+ public:
+ template <typename Function>
+ static R Invoke(Function function, const ::std::tr1::tuple<A1, A2, A3, A4,
+ A5, A6, A7>& args) {
+ using ::std::tr1::get;
+ return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args),
+ get<4>(args), get<5>(args), get<6>(args));
+ }
+
+ template <class Class, typename MethodPtr>
+ static R InvokeMethod(Class* obj_ptr,
+ MethodPtr method_ptr,
+ const ::std::tr1::tuple<A1, A2, A3, A4, A5, A6,
+ A7>& args) {
+ using ::std::tr1::get;
+ return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args),
+ get<3>(args), get<4>(args), get<5>(args), get<6>(args));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7, typename A8>
+class InvokeHelper<R, ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7, A8> > {
+ public:
+ template <typename Function>
+ static R Invoke(Function function, const ::std::tr1::tuple<A1, A2, A3, A4,
+ A5, A6, A7, A8>& args) {
+ using ::std::tr1::get;
+ return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args),
+ get<4>(args), get<5>(args), get<6>(args), get<7>(args));
+ }
+
+ template <class Class, typename MethodPtr>
+ static R InvokeMethod(Class* obj_ptr,
+ MethodPtr method_ptr,
+ const ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7,
+ A8>& args) {
+ using ::std::tr1::get;
+ return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args),
+ get<3>(args), get<4>(args), get<5>(args), get<6>(args), get<7>(args));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7, typename A8, typename A9>
+class InvokeHelper<R, ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7, A8, A9> > {
+ public:
+ template <typename Function>
+ static R Invoke(Function function, const ::std::tr1::tuple<A1, A2, A3, A4,
+ A5, A6, A7, A8, A9>& args) {
+ using ::std::tr1::get;
+ return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args),
+ get<4>(args), get<5>(args), get<6>(args), get<7>(args), get<8>(args));
+ }
+
+ template <class Class, typename MethodPtr>
+ static R InvokeMethod(Class* obj_ptr,
+ MethodPtr method_ptr,
+ const ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7, A8,
+ A9>& args) {
+ using ::std::tr1::get;
+ return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args),
+ get<3>(args), get<4>(args), get<5>(args), get<6>(args), get<7>(args),
+ get<8>(args));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7, typename A8, typename A9,
+ typename A10>
+class InvokeHelper<R, ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7, A8, A9,
+ A10> > {
+ public:
+ template <typename Function>
+ static R Invoke(Function function, const ::std::tr1::tuple<A1, A2, A3, A4,
+ A5, A6, A7, A8, A9, A10>& args) {
+ using ::std::tr1::get;
+ return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args),
+ get<4>(args), get<5>(args), get<6>(args), get<7>(args), get<8>(args),
+ get<9>(args));
+ }
+
+ template <class Class, typename MethodPtr>
+ static R InvokeMethod(Class* obj_ptr,
+ MethodPtr method_ptr,
+ const ::std::tr1::tuple<A1, A2, A3, A4, A5, A6, A7, A8,
+ A9, A10>& args) {
+ using ::std::tr1::get;
+ return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args),
+ get<3>(args), get<4>(args), get<5>(args), get<6>(args), get<7>(args),
+ get<8>(args), get<9>(args));
+ }
+};
+
+// CallableHelper has static methods for invoking "callables",
+// i.e. function pointers and functors. It uses overloading to
+// provide a uniform interface for invoking different kinds of
+// callables. In particular, you can use:
+//
+// CallableHelper<R>::Call(callable, a1, a2, ..., an)
+//
+// to invoke an n-ary callable, where R is its return type. If an
+// argument, say a2, needs to be passed by reference, you should write
+// ByRef(a2) instead of a2 in the above expression.
+template <typename R>
+class CallableHelper {
+ public:
+ // Calls a nullary callable.
+ template <typename Function>
+ static R Call(Function function) { return function(); }
+
+ // Calls a unary callable.
+
+ // We deliberately pass a1 by value instead of const reference here
+ // in case it is a C-string literal. If we had declared the
+ // parameter as 'const A1& a1' and write Call(function, "Hi"), the
+ // compiler would've thought A1 is 'char[3]', which causes trouble
+ // when you need to copy a value of type A1. By declaring the
+ // parameter as 'A1 a1', the compiler will correctly infer that A1
+ // is 'const char*' when it sees Call(function, "Hi").
+ //
+ // Since this function is defined inline, the compiler can get rid
+ // of the copying of the arguments. Therefore the performance won't
+ // be hurt.
+ template <typename Function, typename A1>
+ static R Call(Function function, A1 a1) { return function(a1); }
+
+ // Calls a binary callable.
+ template <typename Function, typename A1, typename A2>
+ static R Call(Function function, A1 a1, A2 a2) {
+ return function(a1, a2);
+ }
+
+ // Calls a ternary callable.
+ template <typename Function, typename A1, typename A2, typename A3>
+ static R Call(Function function, A1 a1, A2 a2, A3 a3) {
+ return function(a1, a2, a3);
+ }
+
+ // Calls a 4-ary callable.
+ template <typename Function, typename A1, typename A2, typename A3,
+ typename A4>
+ static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4) {
+ return function(a1, a2, a3, a4);
+ }
+
+ // Calls a 5-ary callable.
+ template <typename Function, typename A1, typename A2, typename A3,
+ typename A4, typename A5>
+ static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) {
+ return function(a1, a2, a3, a4, a5);
+ }
+
+ // Calls a 6-ary callable.
+ template <typename Function, typename A1, typename A2, typename A3,
+ typename A4, typename A5, typename A6>
+ static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) {
+ return function(a1, a2, a3, a4, a5, a6);
+ }
+
+ // Calls a 7-ary callable.
+ template <typename Function, typename A1, typename A2, typename A3,
+ typename A4, typename A5, typename A6, typename A7>
+ static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6,
+ A7 a7) {
+ return function(a1, a2, a3, a4, a5, a6, a7);
+ }
+
+ // Calls a 8-ary callable.
+ template <typename Function, typename A1, typename A2, typename A3,
+ typename A4, typename A5, typename A6, typename A7, typename A8>
+ static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6,
+ A7 a7, A8 a8) {
+ return function(a1, a2, a3, a4, a5, a6, a7, a8);
+ }
+
+ // Calls a 9-ary callable.
+ template <typename Function, typename A1, typename A2, typename A3,
+ typename A4, typename A5, typename A6, typename A7, typename A8,
+ typename A9>
+ static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6,
+ A7 a7, A8 a8, A9 a9) {
+ return function(a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ }
+
+ // Calls a 10-ary callable.
+ template <typename Function, typename A1, typename A2, typename A3,
+ typename A4, typename A5, typename A6, typename A7, typename A8,
+ typename A9, typename A10>
+ static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6,
+ A7 a7, A8 a8, A9 a9, A10 a10) {
+ return function(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10);
+ }
+
+}; // class CallableHelper
+
+// An INTERNAL macro for extracting the type of a tuple field. It's
+// subject to change without notice - DO NOT USE IN USER CODE!
+#define GMOCK_FIELD_(Tuple, N) \
+ typename ::std::tr1::tuple_element<N, Tuple>::type
+
+// SelectArgs<Result, ArgumentTuple, k1, k2, ..., k_n>::type is the
+// type of an n-ary function whose i-th (1-based) argument type is the
+// k{i}-th (0-based) field of ArgumentTuple, which must be a tuple
+// type, and whose return type is Result. For example,
+// SelectArgs<int, ::std::tr1::tuple<bool, char, double, long>, 0, 3>::type
+// is int(bool, long).
+//
+// SelectArgs<Result, ArgumentTuple, k1, k2, ..., k_n>::Select(args)
+// returns the selected fields (k1, k2, ..., k_n) of args as a tuple.
+// For example,
+// SelectArgs<int, ::std::tr1::tuple<bool, char, double>, 2, 0>::Select(
+// ::std::tr1::make_tuple(true, 'a', 2.5))
+// returns ::std::tr1::tuple (2.5, true).
+//
+// The numbers in list k1, k2, ..., k_n must be >= 0, where n can be
+// in the range [0, 10]. Duplicates are allowed and they don't have
+// to be in an ascending or descending order.
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+ int k4, int k5, int k6, int k7, int k8, int k9, int k10>
+class SelectArgs {
+ public:
+ typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+ GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+ GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5),
+ GMOCK_FIELD_(ArgumentTuple, k6), GMOCK_FIELD_(ArgumentTuple, k7),
+ GMOCK_FIELD_(ArgumentTuple, k8), GMOCK_FIELD_(ArgumentTuple, k9),
+ GMOCK_FIELD_(ArgumentTuple, k10));
+ typedef typename Function<type>::ArgumentTuple SelectedArgs;
+ static SelectedArgs Select(const ArgumentTuple& args) {
+ using ::std::tr1::get;
+ return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+ get<k4>(args), get<k5>(args), get<k6>(args), get<k7>(args),
+ get<k8>(args), get<k9>(args), get<k10>(args));
+ }
+};
+
+template <typename Result, typename ArgumentTuple>
+class SelectArgs<Result, ArgumentTuple,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+ typedef Result type();
+ typedef typename Function<type>::ArgumentTuple SelectedArgs;
+ static SelectedArgs Select(const ArgumentTuple& /* args */) {
+ using ::std::tr1::get;
+ return SelectedArgs();
+ }
+};
+
+template <typename Result, typename ArgumentTuple, int k1>
+class SelectArgs<Result, ArgumentTuple,
+ k1, -1, -1, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+ typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1));
+ typedef typename Function<type>::ArgumentTuple SelectedArgs;
+ static SelectedArgs Select(const ArgumentTuple& args) {
+ using ::std::tr1::get;
+ return SelectedArgs(get<k1>(args));
+ }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2>
+class SelectArgs<Result, ArgumentTuple,
+ k1, k2, -1, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+ typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+ GMOCK_FIELD_(ArgumentTuple, k2));
+ typedef typename Function<type>::ArgumentTuple SelectedArgs;
+ static SelectedArgs Select(const ArgumentTuple& args) {
+ using ::std::tr1::get;
+ return SelectedArgs(get<k1>(args), get<k2>(args));
+ }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3>
+class SelectArgs<Result, ArgumentTuple,
+ k1, k2, k3, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+ typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+ GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3));
+ typedef typename Function<type>::ArgumentTuple SelectedArgs;
+ static SelectedArgs Select(const ArgumentTuple& args) {
+ using ::std::tr1::get;
+ return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args));
+ }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+ int k4>
+class SelectArgs<Result, ArgumentTuple,
+ k1, k2, k3, k4, -1, -1, -1, -1, -1, -1> {
+ public:
+ typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+ GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+ GMOCK_FIELD_(ArgumentTuple, k4));
+ typedef typename Function<type>::ArgumentTuple SelectedArgs;
+ static SelectedArgs Select(const ArgumentTuple& args) {
+ using ::std::tr1::get;
+ return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+ get<k4>(args));
+ }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+ int k4, int k5>
+class SelectArgs<Result, ArgumentTuple,
+ k1, k2, k3, k4, k5, -1, -1, -1, -1, -1> {
+ public:
+ typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+ GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+ GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5));
+ typedef typename Function<type>::ArgumentTuple SelectedArgs;
+ static SelectedArgs Select(const ArgumentTuple& args) {
+ using ::std::tr1::get;
+ return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+ get<k4>(args), get<k5>(args));
+ }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+ int k4, int k5, int k6>
+class SelectArgs<Result, ArgumentTuple,
+ k1, k2, k3, k4, k5, k6, -1, -1, -1, -1> {
+ public:
+ typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+ GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+ GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5),
+ GMOCK_FIELD_(ArgumentTuple, k6));
+ typedef typename Function<type>::ArgumentTuple SelectedArgs;
+ static SelectedArgs Select(const ArgumentTuple& args) {
+ using ::std::tr1::get;
+ return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+ get<k4>(args), get<k5>(args), get<k6>(args));
+ }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+ int k4, int k5, int k6, int k7>
+class SelectArgs<Result, ArgumentTuple,
+ k1, k2, k3, k4, k5, k6, k7, -1, -1, -1> {
+ public:
+ typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+ GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+ GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5),
+ GMOCK_FIELD_(ArgumentTuple, k6), GMOCK_FIELD_(ArgumentTuple, k7));
+ typedef typename Function<type>::ArgumentTuple SelectedArgs;
+ static SelectedArgs Select(const ArgumentTuple& args) {
+ using ::std::tr1::get;
+ return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+ get<k4>(args), get<k5>(args), get<k6>(args), get<k7>(args));
+ }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+ int k4, int k5, int k6, int k7, int k8>
+class SelectArgs<Result, ArgumentTuple,
+ k1, k2, k3, k4, k5, k6, k7, k8, -1, -1> {
+ public:
+ typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+ GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+ GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5),
+ GMOCK_FIELD_(ArgumentTuple, k6), GMOCK_FIELD_(ArgumentTuple, k7),
+ GMOCK_FIELD_(ArgumentTuple, k8));
+ typedef typename Function<type>::ArgumentTuple SelectedArgs;
+ static SelectedArgs Select(const ArgumentTuple& args) {
+ using ::std::tr1::get;
+ return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+ get<k4>(args), get<k5>(args), get<k6>(args), get<k7>(args),
+ get<k8>(args));
+ }
+};
+
+template <typename Result, typename ArgumentTuple, int k1, int k2, int k3,
+ int k4, int k5, int k6, int k7, int k8, int k9>
+class SelectArgs<Result, ArgumentTuple,
+ k1, k2, k3, k4, k5, k6, k7, k8, k9, -1> {
+ public:
+ typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1),
+ GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3),
+ GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5),
+ GMOCK_FIELD_(ArgumentTuple, k6), GMOCK_FIELD_(ArgumentTuple, k7),
+ GMOCK_FIELD_(ArgumentTuple, k8), GMOCK_FIELD_(ArgumentTuple, k9));
+ typedef typename Function<type>::ArgumentTuple SelectedArgs;
+ static SelectedArgs Select(const ArgumentTuple& args) {
+ using ::std::tr1::get;
+ return SelectedArgs(get<k1>(args), get<k2>(args), get<k3>(args),
+ get<k4>(args), get<k5>(args), get<k6>(args), get<k7>(args),
+ get<k8>(args), get<k9>(args));
+ }
+};
+
+#undef GMOCK_FIELD_
+
+// Implements the WithArgs action.
+template <typename InnerAction, int k1 = -1, int k2 = -1, int k3 = -1,
+ int k4 = -1, int k5 = -1, int k6 = -1, int k7 = -1, int k8 = -1,
+ int k9 = -1, int k10 = -1>
+class WithArgsAction {
+ public:
+ explicit WithArgsAction(const InnerAction& action) : action_(action) {}
+
+ template <typename F>
+ operator Action<F>() const { return MakeAction(new Impl<F>(action_)); }
+
+ private:
+ template <typename F>
+ class Impl : public ActionInterface<F> {
+ public:
+ typedef typename Function<F>::Result Result;
+ typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+
+ explicit Impl(const InnerAction& action) : action_(action) {}
+
+ virtual Result Perform(const ArgumentTuple& args) {
+ return action_.Perform(SelectArgs<Result, ArgumentTuple, k1, k2, k3, k4,
+ k5, k6, k7, k8, k9, k10>::Select(args));
+ }
+
+ private:
+ typedef typename SelectArgs<Result, ArgumentTuple,
+ k1, k2, k3, k4, k5, k6, k7, k8, k9, k10>::type InnerFunctionType;
+
+ Action<InnerFunctionType> action_;
+ };
+
+ const InnerAction action_;
+
+ GTEST_DISALLOW_ASSIGN_(WithArgsAction);
+};
+
+// A macro from the ACTION* family (defined later in this file)
+// defines an action that can be used in a mock function. Typically,
+// these actions only care about a subset of the arguments of the mock
+// function. For example, if such an action only uses the second
+// argument, it can be used in any mock function that takes >= 2
+// arguments where the type of the second argument is compatible.
+//
+// Therefore, the action implementation must be prepared to take more
+// arguments than it needs. The ExcessiveArg type is used to
+// represent those excessive arguments. In order to keep the compiler
+// error messages tractable, we define it in the testing namespace
+// instead of testing::internal. However, this is an INTERNAL TYPE
+// and subject to change without notice, so a user MUST NOT USE THIS
+// TYPE DIRECTLY.
+struct ExcessiveArg {};
+
+// A helper class needed for implementing the ACTION* macros.
+template <typename Result, class Impl>
+class ActionHelper {
+ public:
+ static Result Perform(Impl* impl, const ::std::tr1::tuple<>& args) {
+ using ::std::tr1::get;
+ return impl->template gmock_PerformImpl<>(args, ExcessiveArg(),
+ ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+ ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+ ExcessiveArg());
+ }
+
+ template <typename A0>
+ static Result Perform(Impl* impl, const ::std::tr1::tuple<A0>& args) {
+ using ::std::tr1::get;
+ return impl->template gmock_PerformImpl<A0>(args, get<0>(args),
+ ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+ ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+ ExcessiveArg());
+ }
+
+ template <typename A0, typename A1>
+ static Result Perform(Impl* impl, const ::std::tr1::tuple<A0, A1>& args) {
+ using ::std::tr1::get;
+ return impl->template gmock_PerformImpl<A0, A1>(args, get<0>(args),
+ get<1>(args), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+ ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+ ExcessiveArg());
+ }
+
+ template <typename A0, typename A1, typename A2>
+ static Result Perform(Impl* impl, const ::std::tr1::tuple<A0, A1, A2>& args) {
+ using ::std::tr1::get;
+ return impl->template gmock_PerformImpl<A0, A1, A2>(args, get<0>(args),
+ get<1>(args), get<2>(args), ExcessiveArg(), ExcessiveArg(),
+ ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+ ExcessiveArg());
+ }
+
+ template <typename A0, typename A1, typename A2, typename A3>
+ static Result Perform(Impl* impl, const ::std::tr1::tuple<A0, A1, A2,
+ A3>& args) {
+ using ::std::tr1::get;
+ return impl->template gmock_PerformImpl<A0, A1, A2, A3>(args, get<0>(args),
+ get<1>(args), get<2>(args), get<3>(args), ExcessiveArg(),
+ ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+ ExcessiveArg());
+ }
+
+ template <typename A0, typename A1, typename A2, typename A3, typename A4>
+ static Result Perform(Impl* impl, const ::std::tr1::tuple<A0, A1, A2, A3,
+ A4>& args) {
+ using ::std::tr1::get;
+ return impl->template gmock_PerformImpl<A0, A1, A2, A3, A4>(args,
+ get<0>(args), get<1>(args), get<2>(args), get<3>(args), get<4>(args),
+ ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+ ExcessiveArg());
+ }
+
+ template <typename A0, typename A1, typename A2, typename A3, typename A4,
+ typename A5>
+ static Result Perform(Impl* impl, const ::std::tr1::tuple<A0, A1, A2, A3, A4,
+ A5>& args) {
+ using ::std::tr1::get;
+ return impl->template gmock_PerformImpl<A0, A1, A2, A3, A4, A5>(args,
+ get<0>(args), get<1>(args), get<2>(args), get<3>(args), get<4>(args),
+ get<5>(args), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(),
+ ExcessiveArg());
+ }
+
+ template <typename A0, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6>
+ static Result Perform(Impl* impl, const ::std::tr1::tuple<A0, A1, A2, A3, A4,
+ A5, A6>& args) {
+ using ::std::tr1::get;
+ return impl->template gmock_PerformImpl<A0, A1, A2, A3, A4, A5, A6>(args,
+ get<0>(args), get<1>(args), get<2>(args), get<3>(args), get<4>(args),
+ get<5>(args), get<6>(args), ExcessiveArg(), ExcessiveArg(),
+ ExcessiveArg());
+ }
+
+ template <typename A0, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7>
+ static Result Perform(Impl* impl, const ::std::tr1::tuple<A0, A1, A2, A3, A4,
+ A5, A6, A7>& args) {
+ using ::std::tr1::get;
+ return impl->template gmock_PerformImpl<A0, A1, A2, A3, A4, A5, A6,
+ A7>(args, get<0>(args), get<1>(args), get<2>(args), get<3>(args),
+ get<4>(args), get<5>(args), get<6>(args), get<7>(args), ExcessiveArg(),
+ ExcessiveArg());
+ }
+
+ template <typename A0, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7, typename A8>
+ static Result Perform(Impl* impl, const ::std::tr1::tuple<A0, A1, A2, A3, A4,
+ A5, A6, A7, A8>& args) {
+ using ::std::tr1::get;
+ return impl->template gmock_PerformImpl<A0, A1, A2, A3, A4, A5, A6, A7,
+ A8>(args, get<0>(args), get<1>(args), get<2>(args), get<3>(args),
+ get<4>(args), get<5>(args), get<6>(args), get<7>(args), get<8>(args),
+ ExcessiveArg());
+ }
+
+ template <typename A0, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7, typename A8, typename A9>
+ static Result Perform(Impl* impl, const ::std::tr1::tuple<A0, A1, A2, A3, A4,
+ A5, A6, A7, A8, A9>& args) {
+ using ::std::tr1::get;
+ return impl->template gmock_PerformImpl<A0, A1, A2, A3, A4, A5, A6, A7, A8,
+ A9>(args, get<0>(args), get<1>(args), get<2>(args), get<3>(args),
+ get<4>(args), get<5>(args), get<6>(args), get<7>(args), get<8>(args),
+ get<9>(args));
+ }
+};
+
+} // namespace internal
+
+// Various overloads for Invoke().
+
+// WithArgs<N1, N2, ..., Nk>(an_action) creates an action that passes
+// the selected arguments of the mock function to an_action and
+// performs it. It serves as an adaptor between actions with
+// different argument lists. C++ doesn't support default arguments for
+// function templates, so we have to overload it.
+template <int k1, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1>
+WithArgs(const InnerAction& action) {
+ return internal::WithArgsAction<InnerAction, k1>(action);
+}
+
+template <int k1, int k2, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2>
+WithArgs(const InnerAction& action) {
+ return internal::WithArgsAction<InnerAction, k1, k2>(action);
+}
+
+template <int k1, int k2, int k3, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3>
+WithArgs(const InnerAction& action) {
+ return internal::WithArgsAction<InnerAction, k1, k2, k3>(action);
+}
+
+template <int k1, int k2, int k3, int k4, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4>
+WithArgs(const InnerAction& action) {
+ return internal::WithArgsAction<InnerAction, k1, k2, k3, k4>(action);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5>
+WithArgs(const InnerAction& action) {
+ return internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5>(action);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6>
+WithArgs(const InnerAction& action) {
+ return internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6>(action);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7,
+ typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7>
+WithArgs(const InnerAction& action) {
+ return internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6,
+ k7>(action);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7, int k8,
+ typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7, k8>
+WithArgs(const InnerAction& action) {
+ return internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7,
+ k8>(action);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7, int k8,
+ int k9, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7, k8, k9>
+WithArgs(const InnerAction& action) {
+ return internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7, k8,
+ k9>(action);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7, int k8,
+ int k9, int k10, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7, k8,
+ k9, k10>
+WithArgs(const InnerAction& action) {
+ return internal::WithArgsAction<InnerAction, k1, k2, k3, k4, k5, k6, k7, k8,
+ k9, k10>(action);
+}
+
+// Creates an action that does actions a1, a2, ..., sequentially in
+// each invocation.
+template <typename Action1, typename Action2>
+inline internal::DoBothAction<Action1, Action2>
+DoAll(Action1 a1, Action2 a2) {
+ return internal::DoBothAction<Action1, Action2>(a1, a2);
+}
+
+template <typename Action1, typename Action2, typename Action3>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+ Action3> >
+DoAll(Action1 a1, Action2 a2, Action3 a3) {
+ return DoAll(a1, DoAll(a2, a3));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+ typename Action4>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+ internal::DoBothAction<Action3, Action4> > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4) {
+ return DoAll(a1, DoAll(a2, a3, a4));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+ typename Action4, typename Action5>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+ internal::DoBothAction<Action3, internal::DoBothAction<Action4,
+ Action5> > > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5) {
+ return DoAll(a1, DoAll(a2, a3, a4, a5));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+ typename Action4, typename Action5, typename Action6>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+ internal::DoBothAction<Action3, internal::DoBothAction<Action4,
+ internal::DoBothAction<Action5, Action6> > > > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6) {
+ return DoAll(a1, DoAll(a2, a3, a4, a5, a6));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+ typename Action4, typename Action5, typename Action6, typename Action7>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+ internal::DoBothAction<Action3, internal::DoBothAction<Action4,
+ internal::DoBothAction<Action5, internal::DoBothAction<Action6,
+ Action7> > > > > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6,
+ Action7 a7) {
+ return DoAll(a1, DoAll(a2, a3, a4, a5, a6, a7));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+ typename Action4, typename Action5, typename Action6, typename Action7,
+ typename Action8>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+ internal::DoBothAction<Action3, internal::DoBothAction<Action4,
+ internal::DoBothAction<Action5, internal::DoBothAction<Action6,
+ internal::DoBothAction<Action7, Action8> > > > > > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6,
+ Action7 a7, Action8 a8) {
+ return DoAll(a1, DoAll(a2, a3, a4, a5, a6, a7, a8));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+ typename Action4, typename Action5, typename Action6, typename Action7,
+ typename Action8, typename Action9>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+ internal::DoBothAction<Action3, internal::DoBothAction<Action4,
+ internal::DoBothAction<Action5, internal::DoBothAction<Action6,
+ internal::DoBothAction<Action7, internal::DoBothAction<Action8,
+ Action9> > > > > > > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6,
+ Action7 a7, Action8 a8, Action9 a9) {
+ return DoAll(a1, DoAll(a2, a3, a4, a5, a6, a7, a8, a9));
+}
+
+template <typename Action1, typename Action2, typename Action3,
+ typename Action4, typename Action5, typename Action6, typename Action7,
+ typename Action8, typename Action9, typename Action10>
+inline internal::DoBothAction<Action1, internal::DoBothAction<Action2,
+ internal::DoBothAction<Action3, internal::DoBothAction<Action4,
+ internal::DoBothAction<Action5, internal::DoBothAction<Action6,
+ internal::DoBothAction<Action7, internal::DoBothAction<Action8,
+ internal::DoBothAction<Action9, Action10> > > > > > > > >
+DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6,
+ Action7 a7, Action8 a8, Action9 a9, Action10 a10) {
+ return DoAll(a1, DoAll(a2, a3, a4, a5, a6, a7, a8, a9, a10));
+}
+
+} // namespace testing
+
+// The ACTION* family of macros can be used in a namespace scope to
+// define custom actions easily. The syntax:
+//
+// ACTION(name) { statements; }
+//
+// will define an action with the given name that executes the
+// statements. The value returned by the statements will be used as
+// the return value of the action. Inside the statements, you can
+// refer to the K-th (0-based) argument of the mock function by
+// 'argK', and refer to its type by 'argK_type'. For example:
+//
+// ACTION(IncrementArg1) {
+// arg1_type temp = arg1;
+// return ++(*temp);
+// }
+//
+// allows you to write
+//
+// ...WillOnce(IncrementArg1());
+//
+// You can also refer to the entire argument tuple and its type by
+// 'args' and 'args_type', and refer to the mock function type and its
+// return type by 'function_type' and 'return_type'.
+//
+// Note that you don't need to specify the types of the mock function
+// arguments. However rest assured that your code is still type-safe:
+// you'll get a compiler error if *arg1 doesn't support the ++
+// operator, or if the type of ++(*arg1) isn't compatible with the
+// mock function's return type, for example.
+//
+// Sometimes you'll want to parameterize the action. For that you can use
+// another macro:
+//
+// ACTION_P(name, param_name) { statements; }
+//
+// For example:
+//
+// ACTION_P(Add, n) { return arg0 + n; }
+//
+// will allow you to write:
+//
+// ...WillOnce(Add(5));
+//
+// Note that you don't need to provide the type of the parameter
+// either. If you need to reference the type of a parameter named
+// 'foo', you can write 'foo_type'. For example, in the body of
+// ACTION_P(Add, n) above, you can write 'n_type' to refer to the type
+// of 'n'.
+//
+// We also provide ACTION_P2, ACTION_P3, ..., up to ACTION_P10 to support
+// multi-parameter actions.
+//
+// For the purpose of typing, you can view
+//
+// ACTION_Pk(Foo, p1, ..., pk) { ... }
+//
+// as shorthand for
+//
+// template <typename p1_type, ..., typename pk_type>
+// FooActionPk<p1_type, ..., pk_type> Foo(p1_type p1, ..., pk_type pk) { ... }
+//
+// In particular, you can provide the template type arguments
+// explicitly when invoking Foo(), as in Foo<long, bool>(5, false);
+// although usually you can rely on the compiler to infer the types
+// for you automatically. You can assign the result of expression
+// Foo(p1, ..., pk) to a variable of type FooActionPk<p1_type, ...,
+// pk_type>. This can be useful when composing actions.
+//
+// You can also overload actions with different numbers of parameters:
+//
+// ACTION_P(Plus, a) { ... }
+// ACTION_P2(Plus, a, b) { ... }
+//
+// While it's tempting to always use the ACTION* macros when defining
+// a new action, you should also consider implementing ActionInterface
+// or using MakePolymorphicAction() instead, especially if you need to
+// use the action a lot. While these approaches require more work,
+// they give you more control on the types of the mock function
+// arguments and the action parameters, which in general leads to
+// better compiler error messages that pay off in the long run. They
+// also allow overloading actions based on parameter types (as opposed
+// to just based on the number of parameters).
+//
+// CAVEAT:
+//
+// ACTION*() can only be used in a namespace scope. The reason is
+// that C++ doesn't yet allow function-local types to be used to
+// instantiate templates. The up-coming C++0x standard will fix this.
+// Once that's done, we'll consider supporting using ACTION*() inside
+// a function.
+//
+// MORE INFORMATION:
+//
+// To learn more about using these macros, please search for 'ACTION'
+// on http://code.google.com/p/googlemock/wiki/CookBook.
+
+// An internal macro needed for implementing ACTION*().
+#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_\
+ const args_type& args GTEST_ATTRIBUTE_UNUSED_,\
+ arg0_type arg0 GTEST_ATTRIBUTE_UNUSED_,\
+ arg1_type arg1 GTEST_ATTRIBUTE_UNUSED_,\
+ arg2_type arg2 GTEST_ATTRIBUTE_UNUSED_,\
+ arg3_type arg3 GTEST_ATTRIBUTE_UNUSED_,\
+ arg4_type arg4 GTEST_ATTRIBUTE_UNUSED_,\
+ arg5_type arg5 GTEST_ATTRIBUTE_UNUSED_,\
+ arg6_type arg6 GTEST_ATTRIBUTE_UNUSED_,\
+ arg7_type arg7 GTEST_ATTRIBUTE_UNUSED_,\
+ arg8_type arg8 GTEST_ATTRIBUTE_UNUSED_,\
+ arg9_type arg9 GTEST_ATTRIBUTE_UNUSED_
+
+// Sometimes you want to give an action explicit template parameters
+// that cannot be inferred from its value parameters. ACTION() and
+// ACTION_P*() don't support that. ACTION_TEMPLATE() remedies that
+// and can be viewed as an extension to ACTION() and ACTION_P*().
+//
+// The syntax:
+//
+// ACTION_TEMPLATE(ActionName,
+// HAS_m_TEMPLATE_PARAMS(kind1, name1, ..., kind_m, name_m),
+// AND_n_VALUE_PARAMS(p1, ..., p_n)) { statements; }
+//
+// defines an action template that takes m explicit template
+// parameters and n value parameters. name_i is the name of the i-th
+// template parameter, and kind_i specifies whether it's a typename,
+// an integral constant, or a template. p_i is the name of the i-th
+// value parameter.
+//
+// Example:
+//
+// // DuplicateArg<k, T>(output) converts the k-th argument of the mock
+// // function to type T and copies it to *output.
+// ACTION_TEMPLATE(DuplicateArg,
+// HAS_2_TEMPLATE_PARAMS(int, k, typename, T),
+// AND_1_VALUE_PARAMS(output)) {
+// *output = T(std::tr1::get<k>(args));
+// }
+// ...
+// int n;
+// EXPECT_CALL(mock, Foo(_, _))
+// .WillOnce(DuplicateArg<1, unsigned char>(&n));
+//
+// To create an instance of an action template, write:
+//
+// ActionName<t1, ..., t_m>(v1, ..., v_n)
+//
+// where the ts are the template arguments and the vs are the value
+// arguments. The value argument types are inferred by the compiler.
+// If you want to explicitly specify the value argument types, you can
+// provide additional template arguments:
+//
+// ActionName<t1, ..., t_m, u1, ..., u_k>(v1, ..., v_n)
+//
+// where u_i is the desired type of v_i.
+//
+// ACTION_TEMPLATE and ACTION/ACTION_P* can be overloaded on the
+// number of value parameters, but not on the number of template
+// parameters. Without the restriction, the meaning of the following
+// is unclear:
+//
+// OverloadedAction<int, bool>(x);
+//
+// Are we using a single-template-parameter action where 'bool' refers
+// to the type of x, or are we using a two-template-parameter action
+// where the compiler is asked to infer the type of x?
+//
+// Implementation notes:
+//
+// GMOCK_INTERNAL_*_HAS_m_TEMPLATE_PARAMS and
+// GMOCK_INTERNAL_*_AND_n_VALUE_PARAMS are internal macros for
+// implementing ACTION_TEMPLATE. The main trick we use is to create
+// new macro invocations when expanding a macro. For example, we have
+//
+// #define ACTION_TEMPLATE(name, template_params, value_params)
+// ... GMOCK_INTERNAL_DECL_##template_params ...
+//
+// which causes ACTION_TEMPLATE(..., HAS_1_TEMPLATE_PARAMS(typename, T), ...)
+// to expand to
+//
+// ... GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS(typename, T) ...
+//
+// Since GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS is a macro, the
+// preprocessor will continue to expand it to
+//
+// ... typename T ...
+//
+// This technique conforms to the C++ standard and is portable. It
+// allows us to implement action templates using O(N) code, where N is
+// the maximum number of template/value parameters supported. Without
+// using it, we'd have to devote O(N^2) amount of code to implement all
+// combinations of m and n.
+
+// Declares the template parameters.
+#define GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS(kind0, name0) kind0 name0
+#define GMOCK_INTERNAL_DECL_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, \
+ name1) kind0 name0, kind1 name1
+#define GMOCK_INTERNAL_DECL_HAS_3_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2) kind0 name0, kind1 name1, kind2 name2
+#define GMOCK_INTERNAL_DECL_HAS_4_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2, kind3, name3) kind0 name0, kind1 name1, kind2 name2, \
+ kind3 name3
+#define GMOCK_INTERNAL_DECL_HAS_5_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2, kind3, name3, kind4, name4) kind0 name0, kind1 name1, \
+ kind2 name2, kind3 name3, kind4 name4
+#define GMOCK_INTERNAL_DECL_HAS_6_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2, kind3, name3, kind4, name4, kind5, name5) kind0 name0, \
+ kind1 name1, kind2 name2, kind3 name3, kind4 name4, kind5 name5
+#define GMOCK_INTERNAL_DECL_HAS_7_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \
+ name6) kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \
+ kind5 name5, kind6 name6
+#define GMOCK_INTERNAL_DECL_HAS_8_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \
+ kind7, name7) kind0 name0, kind1 name1, kind2 name2, kind3 name3, \
+ kind4 name4, kind5 name5, kind6 name6, kind7 name7
+#define GMOCK_INTERNAL_DECL_HAS_9_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \
+ kind7, name7, kind8, name8) kind0 name0, kind1 name1, kind2 name2, \
+ kind3 name3, kind4 name4, kind5 name5, kind6 name6, kind7 name7, \
+ kind8 name8
+#define GMOCK_INTERNAL_DECL_HAS_10_TEMPLATE_PARAMS(kind0, name0, kind1, \
+ name1, kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \
+ name6, kind7, name7, kind8, name8, kind9, name9) kind0 name0, \
+ kind1 name1, kind2 name2, kind3 name3, kind4 name4, kind5 name5, \
+ kind6 name6, kind7 name7, kind8 name8, kind9 name9
+
+// Lists the template parameters.
+#define GMOCK_INTERNAL_LIST_HAS_1_TEMPLATE_PARAMS(kind0, name0) name0
+#define GMOCK_INTERNAL_LIST_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, \
+ name1) name0, name1
+#define GMOCK_INTERNAL_LIST_HAS_3_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2) name0, name1, name2
+#define GMOCK_INTERNAL_LIST_HAS_4_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2, kind3, name3) name0, name1, name2, name3
+#define GMOCK_INTERNAL_LIST_HAS_5_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2, kind3, name3, kind4, name4) name0, name1, name2, name3, \
+ name4
+#define GMOCK_INTERNAL_LIST_HAS_6_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2, kind3, name3, kind4, name4, kind5, name5) name0, name1, \
+ name2, name3, name4, name5
+#define GMOCK_INTERNAL_LIST_HAS_7_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \
+ name6) name0, name1, name2, name3, name4, name5, name6
+#define GMOCK_INTERNAL_LIST_HAS_8_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \
+ kind7, name7) name0, name1, name2, name3, name4, name5, name6, name7
+#define GMOCK_INTERNAL_LIST_HAS_9_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \
+ kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \
+ kind7, name7, kind8, name8) name0, name1, name2, name3, name4, name5, \
+ name6, name7, name8
+#define GMOCK_INTERNAL_LIST_HAS_10_TEMPLATE_PARAMS(kind0, name0, kind1, \
+ name1, kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \
+ name6, kind7, name7, kind8, name8, kind9, name9) name0, name1, name2, \
+ name3, name4, name5, name6, name7, name8, name9
+
+// Declares the types of value parameters.
+#define GMOCK_INTERNAL_DECL_TYPE_AND_0_VALUE_PARAMS()
+#define GMOCK_INTERNAL_DECL_TYPE_AND_1_VALUE_PARAMS(p0) , typename p0##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_2_VALUE_PARAMS(p0, p1) , \
+ typename p0##_type, typename p1##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) , \
+ typename p0##_type, typename p1##_type, typename p2##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) , \
+ typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) , \
+ typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) , \
+ typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+ p6) , typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+ p6, p7) , typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+ p6, p7, p8) , typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type
+#define GMOCK_INTERNAL_DECL_TYPE_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+ p6, p7, p8, p9) , typename p0##_type, typename p1##_type, \
+ typename p2##_type, typename p3##_type, typename p4##_type, \
+ typename p5##_type, typename p6##_type, typename p7##_type, \
+ typename p8##_type, typename p9##_type
+
+// Initializes the value parameters.
+#define GMOCK_INTERNAL_INIT_AND_0_VALUE_PARAMS()\
+ ()
+#define GMOCK_INTERNAL_INIT_AND_1_VALUE_PARAMS(p0)\
+ (p0##_type gmock_p0) : p0(gmock_p0)
+#define GMOCK_INTERNAL_INIT_AND_2_VALUE_PARAMS(p0, p1)\
+ (p0##_type gmock_p0, p1##_type gmock_p1) : p0(gmock_p0), p1(gmock_p1)
+#define GMOCK_INTERNAL_INIT_AND_3_VALUE_PARAMS(p0, p1, p2)\
+ (p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2)
+#define GMOCK_INTERNAL_INIT_AND_4_VALUE_PARAMS(p0, p1, p2, p3)\
+ (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3)
+#define GMOCK_INTERNAL_INIT_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)\
+ (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4) : p0(gmock_p0), p1(gmock_p1), \
+ p2(gmock_p2), p3(gmock_p3), p4(gmock_p4)
+#define GMOCK_INTERNAL_INIT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)\
+ (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, \
+ p5##_type gmock_p5) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5)
+#define GMOCK_INTERNAL_INIT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)\
+ (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+ p6##_type gmock_p6) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6)
+#define GMOCK_INTERNAL_INIT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)\
+ (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+ p6##_type gmock_p6, p7##_type gmock_p7) : p0(gmock_p0), p1(gmock_p1), \
+ p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+ p7(gmock_p7)
+#define GMOCK_INTERNAL_INIT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8)\
+ (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+ p6##_type gmock_p6, p7##_type gmock_p7, \
+ p8##_type gmock_p8) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \
+ p8(gmock_p8)
+#define GMOCK_INTERNAL_INIT_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8, p9)\
+ (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+ p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \
+ p9##_type gmock_p9) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \
+ p8(gmock_p8), p9(gmock_p9)
+
+// Declares the fields for storing the value parameters.
+#define GMOCK_INTERNAL_DEFN_AND_0_VALUE_PARAMS()
+#define GMOCK_INTERNAL_DEFN_AND_1_VALUE_PARAMS(p0) p0##_type p0;
+#define GMOCK_INTERNAL_DEFN_AND_2_VALUE_PARAMS(p0, p1) p0##_type p0; \
+ p1##_type p1;
+#define GMOCK_INTERNAL_DEFN_AND_3_VALUE_PARAMS(p0, p1, p2) p0##_type p0; \
+ p1##_type p1; p2##_type p2;
+#define GMOCK_INTERNAL_DEFN_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0##_type p0; \
+ p1##_type p1; p2##_type p2; p3##_type p3;
+#define GMOCK_INTERNAL_DEFN_AND_5_VALUE_PARAMS(p0, p1, p2, p3, \
+ p4) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4;
+#define GMOCK_INTERNAL_DEFN_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, \
+ p5) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \
+ p5##_type p5;
+#define GMOCK_INTERNAL_DEFN_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+ p6) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \
+ p5##_type p5; p6##_type p6;
+#define GMOCK_INTERNAL_DEFN_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \
+ p5##_type p5; p6##_type p6; p7##_type p7;
+#define GMOCK_INTERNAL_DEFN_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; \
+ p4##_type p4; p5##_type p5; p6##_type p6; p7##_type p7; p8##_type p8;
+#define GMOCK_INTERNAL_DEFN_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8, p9) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; \
+ p4##_type p4; p5##_type p5; p6##_type p6; p7##_type p7; p8##_type p8; \
+ p9##_type p9;
+
+// Lists the value parameters.
+#define GMOCK_INTERNAL_LIST_AND_0_VALUE_PARAMS()
+#define GMOCK_INTERNAL_LIST_AND_1_VALUE_PARAMS(p0) p0
+#define GMOCK_INTERNAL_LIST_AND_2_VALUE_PARAMS(p0, p1) p0, p1
+#define GMOCK_INTERNAL_LIST_AND_3_VALUE_PARAMS(p0, p1, p2) p0, p1, p2
+#define GMOCK_INTERNAL_LIST_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0, p1, p2, p3
+#define GMOCK_INTERNAL_LIST_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) p0, p1, \
+ p2, p3, p4
+#define GMOCK_INTERNAL_LIST_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) p0, \
+ p1, p2, p3, p4, p5
+#define GMOCK_INTERNAL_LIST_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+ p6) p0, p1, p2, p3, p4, p5, p6
+#define GMOCK_INTERNAL_LIST_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7) p0, p1, p2, p3, p4, p5, p6, p7
+#define GMOCK_INTERNAL_LIST_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) p0, p1, p2, p3, p4, p5, p6, p7, p8
+#define GMOCK_INTERNAL_LIST_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8, p9) p0, p1, p2, p3, p4, p5, p6, p7, p8, p9
+
+// Lists the value parameter types.
+#define GMOCK_INTERNAL_LIST_TYPE_AND_0_VALUE_PARAMS()
+#define GMOCK_INTERNAL_LIST_TYPE_AND_1_VALUE_PARAMS(p0) , p0##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_2_VALUE_PARAMS(p0, p1) , p0##_type, \
+ p1##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) , p0##_type, \
+ p1##_type, p2##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) , \
+ p0##_type, p1##_type, p2##_type, p3##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) , \
+ p0##_type, p1##_type, p2##_type, p3##_type, p4##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) , \
+ p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+ p6) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, \
+ p6##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+ p6, p7) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+ p5##_type, p6##_type, p7##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+ p6, p7, p8) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+ p5##_type, p6##_type, p7##_type, p8##_type
+#define GMOCK_INTERNAL_LIST_TYPE_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+ p6, p7, p8, p9) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+ p5##_type, p6##_type, p7##_type, p8##_type, p9##_type
+
+// Declares the value parameters.
+#define GMOCK_INTERNAL_DECL_AND_0_VALUE_PARAMS()
+#define GMOCK_INTERNAL_DECL_AND_1_VALUE_PARAMS(p0) p0##_type p0
+#define GMOCK_INTERNAL_DECL_AND_2_VALUE_PARAMS(p0, p1) p0##_type p0, \
+ p1##_type p1
+#define GMOCK_INTERNAL_DECL_AND_3_VALUE_PARAMS(p0, p1, p2) p0##_type p0, \
+ p1##_type p1, p2##_type p2
+#define GMOCK_INTERNAL_DECL_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0##_type p0, \
+ p1##_type p1, p2##_type p2, p3##_type p3
+#define GMOCK_INTERNAL_DECL_AND_5_VALUE_PARAMS(p0, p1, p2, p3, \
+ p4) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4
+#define GMOCK_INTERNAL_DECL_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, \
+ p5) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \
+ p5##_type p5
+#define GMOCK_INTERNAL_DECL_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \
+ p6) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \
+ p5##_type p5, p6##_type p6
+#define GMOCK_INTERNAL_DECL_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \
+ p5##_type p5, p6##_type p6, p7##_type p7
+#define GMOCK_INTERNAL_DECL_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+ p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8
+#define GMOCK_INTERNAL_DECL_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8, p9) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+ p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \
+ p9##_type p9
+
+// The suffix of the class template implementing the action template.
+#define GMOCK_INTERNAL_COUNT_AND_0_VALUE_PARAMS()
+#define GMOCK_INTERNAL_COUNT_AND_1_VALUE_PARAMS(p0) P
+#define GMOCK_INTERNAL_COUNT_AND_2_VALUE_PARAMS(p0, p1) P2
+#define GMOCK_INTERNAL_COUNT_AND_3_VALUE_PARAMS(p0, p1, p2) P3
+#define GMOCK_INTERNAL_COUNT_AND_4_VALUE_PARAMS(p0, p1, p2, p3) P4
+#define GMOCK_INTERNAL_COUNT_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) P5
+#define GMOCK_INTERNAL_COUNT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) P6
+#define GMOCK_INTERNAL_COUNT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) P7
+#define GMOCK_INTERNAL_COUNT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7) P8
+#define GMOCK_INTERNAL_COUNT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) P9
+#define GMOCK_INTERNAL_COUNT_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8, p9) P10
+
+// The name of the class template implementing the action template.
+#define GMOCK_ACTION_CLASS_(name, value_params)\
+ GTEST_CONCAT_TOKEN_(name##Action, GMOCK_INTERNAL_COUNT_##value_params)
+
+#define ACTION_TEMPLATE(name, template_params, value_params)\
+ template <GMOCK_INTERNAL_DECL_##template_params\
+ GMOCK_INTERNAL_DECL_TYPE_##value_params>\
+ class GMOCK_ACTION_CLASS_(name, value_params) {\
+ public:\
+ GMOCK_ACTION_CLASS_(name, value_params)\
+ GMOCK_INTERNAL_INIT_##value_params {}\
+ template <typename F>\
+ class gmock_Impl : public ::testing::ActionInterface<F> {\
+ public:\
+ typedef F function_type;\
+ typedef typename ::testing::internal::Function<F>::Result return_type;\
+ typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+ args_type;\
+ explicit gmock_Impl GMOCK_INTERNAL_INIT_##value_params {}\
+ virtual return_type Perform(const args_type& args) {\
+ return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+ Perform(this, args);\
+ }\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+ arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+ arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+ arg9_type arg9) const;\
+ GMOCK_INTERNAL_DEFN_##value_params\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename F> operator ::testing::Action<F>() const {\
+ return ::testing::Action<F>(\
+ new gmock_Impl<F>(GMOCK_INTERNAL_LIST_##value_params));\
+ }\
+ GMOCK_INTERNAL_DEFN_##value_params\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(GMOCK_ACTION_CLASS_(name, value_params));\
+ };\
+ template <GMOCK_INTERNAL_DECL_##template_params\
+ GMOCK_INTERNAL_DECL_TYPE_##value_params>\
+ inline GMOCK_ACTION_CLASS_(name, value_params)<\
+ GMOCK_INTERNAL_LIST_##template_params\
+ GMOCK_INTERNAL_LIST_TYPE_##value_params> name(\
+ GMOCK_INTERNAL_DECL_##value_params) {\
+ return GMOCK_ACTION_CLASS_(name, value_params)<\
+ GMOCK_INTERNAL_LIST_##template_params\
+ GMOCK_INTERNAL_LIST_TYPE_##value_params>(\
+ GMOCK_INTERNAL_LIST_##value_params);\
+ }\
+ template <GMOCK_INTERNAL_DECL_##template_params\
+ GMOCK_INTERNAL_DECL_TYPE_##value_params>\
+ template <typename F>\
+ template <typename arg0_type, typename arg1_type, typename arg2_type,\
+ typename arg3_type, typename arg4_type, typename arg5_type,\
+ typename arg6_type, typename arg7_type, typename arg8_type,\
+ typename arg9_type>\
+ typename ::testing::internal::Function<F>::Result\
+ GMOCK_ACTION_CLASS_(name, value_params)<\
+ GMOCK_INTERNAL_LIST_##template_params\
+ GMOCK_INTERNAL_LIST_TYPE_##value_params>::gmock_Impl<F>::\
+ gmock_PerformImpl(\
+ GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION(name)\
+ class name##Action {\
+ public:\
+ name##Action() {}\
+ template <typename F>\
+ class gmock_Impl : public ::testing::ActionInterface<F> {\
+ public:\
+ typedef F function_type;\
+ typedef typename ::testing::internal::Function<F>::Result return_type;\
+ typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+ args_type;\
+ gmock_Impl() {}\
+ virtual return_type Perform(const args_type& args) {\
+ return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+ Perform(this, args);\
+ }\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+ arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+ arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+ arg9_type arg9) const;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename F> operator ::testing::Action<F>() const {\
+ return ::testing::Action<F>(new gmock_Impl<F>());\
+ }\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##Action);\
+ };\
+ inline name##Action name() {\
+ return name##Action();\
+ }\
+ template <typename F>\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ typename ::testing::internal::Function<F>::Result\
+ name##Action::gmock_Impl<F>::gmock_PerformImpl(\
+ GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P(name, p0)\
+ template <typename p0##_type>\
+ class name##ActionP {\
+ public:\
+ name##ActionP(p0##_type gmock_p0) : p0(gmock_p0) {}\
+ template <typename F>\
+ class gmock_Impl : public ::testing::ActionInterface<F> {\
+ public:\
+ typedef F function_type;\
+ typedef typename ::testing::internal::Function<F>::Result return_type;\
+ typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+ args_type;\
+ explicit gmock_Impl(p0##_type gmock_p0) : p0(gmock_p0) {}\
+ virtual return_type Perform(const args_type& args) {\
+ return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+ Perform(this, args);\
+ }\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+ arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+ arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+ arg9_type arg9) const;\
+ p0##_type p0;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename F> operator ::testing::Action<F>() const {\
+ return ::testing::Action<F>(new gmock_Impl<F>(p0));\
+ }\
+ p0##_type p0;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##ActionP);\
+ };\
+ template <typename p0##_type>\
+ inline name##ActionP<p0##_type> name(p0##_type p0) {\
+ return name##ActionP<p0##_type>(p0);\
+ }\
+ template <typename p0##_type>\
+ template <typename F>\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ typename ::testing::internal::Function<F>::Result\
+ name##ActionP<p0##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+ GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P2(name, p0, p1)\
+ template <typename p0##_type, typename p1##_type>\
+ class name##ActionP2 {\
+ public:\
+ name##ActionP2(p0##_type gmock_p0, p1##_type gmock_p1) : p0(gmock_p0), \
+ p1(gmock_p1) {}\
+ template <typename F>\
+ class gmock_Impl : public ::testing::ActionInterface<F> {\
+ public:\
+ typedef F function_type;\
+ typedef typename ::testing::internal::Function<F>::Result return_type;\
+ typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+ args_type;\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1) : p0(gmock_p0), \
+ p1(gmock_p1) {}\
+ virtual return_type Perform(const args_type& args) {\
+ return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+ Perform(this, args);\
+ }\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+ arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+ arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+ arg9_type arg9) const;\
+ p0##_type p0;\
+ p1##_type p1;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename F> operator ::testing::Action<F>() const {\
+ return ::testing::Action<F>(new gmock_Impl<F>(p0, p1));\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##ActionP2);\
+ };\
+ template <typename p0##_type, typename p1##_type>\
+ inline name##ActionP2<p0##_type, p1##_type> name(p0##_type p0, \
+ p1##_type p1) {\
+ return name##ActionP2<p0##_type, p1##_type>(p0, p1);\
+ }\
+ template <typename p0##_type, typename p1##_type>\
+ template <typename F>\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ typename ::testing::internal::Function<F>::Result\
+ name##ActionP2<p0##_type, p1##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+ GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P3(name, p0, p1, p2)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type>\
+ class name##ActionP3 {\
+ public:\
+ name##ActionP3(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) {}\
+ template <typename F>\
+ class gmock_Impl : public ::testing::ActionInterface<F> {\
+ public:\
+ typedef F function_type;\
+ typedef typename ::testing::internal::Function<F>::Result return_type;\
+ typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+ args_type;\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) {}\
+ virtual return_type Perform(const args_type& args) {\
+ return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+ Perform(this, args);\
+ }\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+ arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+ arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+ arg9_type arg9) const;\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename F> operator ::testing::Action<F>() const {\
+ return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2));\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##ActionP3);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type>\
+ inline name##ActionP3<p0##_type, p1##_type, p2##_type> name(p0##_type p0, \
+ p1##_type p1, p2##_type p2) {\
+ return name##ActionP3<p0##_type, p1##_type, p2##_type>(p0, p1, p2);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type>\
+ template <typename F>\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ typename ::testing::internal::Function<F>::Result\
+ name##ActionP3<p0##_type, p1##_type, \
+ p2##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+ GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P4(name, p0, p1, p2, p3)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type>\
+ class name##ActionP4 {\
+ public:\
+ name##ActionP4(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3) : p0(gmock_p0), p1(gmock_p1), \
+ p2(gmock_p2), p3(gmock_p3) {}\
+ template <typename F>\
+ class gmock_Impl : public ::testing::ActionInterface<F> {\
+ public:\
+ typedef F function_type;\
+ typedef typename ::testing::internal::Function<F>::Result return_type;\
+ typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+ args_type;\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3) {}\
+ virtual return_type Perform(const args_type& args) {\
+ return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+ Perform(this, args);\
+ }\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+ arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+ arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+ arg9_type arg9) const;\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename F> operator ::testing::Action<F>() const {\
+ return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3));\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##ActionP4);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type>\
+ inline name##ActionP4<p0##_type, p1##_type, p2##_type, \
+ p3##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, \
+ p3##_type p3) {\
+ return name##ActionP4<p0##_type, p1##_type, p2##_type, p3##_type>(p0, p1, \
+ p2, p3);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type>\
+ template <typename F>\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ typename ::testing::internal::Function<F>::Result\
+ name##ActionP4<p0##_type, p1##_type, p2##_type, \
+ p3##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+ GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P5(name, p0, p1, p2, p3, p4)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type>\
+ class name##ActionP5 {\
+ public:\
+ name##ActionP5(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3, \
+ p4##_type gmock_p4) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4) {}\
+ template <typename F>\
+ class gmock_Impl : public ::testing::ActionInterface<F> {\
+ public:\
+ typedef F function_type;\
+ typedef typename ::testing::internal::Function<F>::Result return_type;\
+ typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+ args_type;\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4) : p0(gmock_p0), \
+ p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), p4(gmock_p4) {}\
+ virtual return_type Perform(const args_type& args) {\
+ return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+ Perform(this, args);\
+ }\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+ arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+ arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+ arg9_type arg9) const;\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename F> operator ::testing::Action<F>() const {\
+ return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3, p4));\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##ActionP5);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type>\
+ inline name##ActionP5<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+ p4##_type p4) {\
+ return name##ActionP5<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type>(p0, p1, p2, p3, p4);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type>\
+ template <typename F>\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ typename ::testing::internal::Function<F>::Result\
+ name##ActionP5<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+ GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P6(name, p0, p1, p2, p3, p4, p5)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type>\
+ class name##ActionP6 {\
+ public:\
+ name##ActionP6(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+ p5##_type gmock_p5) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5) {}\
+ template <typename F>\
+ class gmock_Impl : public ::testing::ActionInterface<F> {\
+ public:\
+ typedef F function_type;\
+ typedef typename ::testing::internal::Function<F>::Result return_type;\
+ typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+ args_type;\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, \
+ p5##_type gmock_p5) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5) {}\
+ virtual return_type Perform(const args_type& args) {\
+ return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+ Perform(this, args);\
+ }\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+ arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+ arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+ arg9_type arg9) const;\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename F> operator ::testing::Action<F>() const {\
+ return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3, p4, p5));\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##ActionP6);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type>\
+ inline name##ActionP6<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, \
+ p3##_type p3, p4##_type p4, p5##_type p5) {\
+ return name##ActionP6<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type>(p0, p1, p2, p3, p4, p5);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type>\
+ template <typename F>\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ typename ::testing::internal::Function<F>::Result\
+ name##ActionP6<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+ p5##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+ GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P7(name, p0, p1, p2, p3, p4, p5, p6)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type>\
+ class name##ActionP7 {\
+ public:\
+ name##ActionP7(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+ p5##_type gmock_p5, p6##_type gmock_p6) : p0(gmock_p0), p1(gmock_p1), \
+ p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), \
+ p6(gmock_p6) {}\
+ template <typename F>\
+ class gmock_Impl : public ::testing::ActionInterface<F> {\
+ public:\
+ typedef F function_type;\
+ typedef typename ::testing::internal::Function<F>::Result return_type;\
+ typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+ args_type;\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+ p6##_type gmock_p6) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6) {}\
+ virtual return_type Perform(const args_type& args) {\
+ return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+ Perform(this, args);\
+ }\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+ arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+ arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+ arg9_type arg9) const;\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename F> operator ::testing::Action<F>() const {\
+ return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3, p4, p5, \
+ p6));\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##ActionP7);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type>\
+ inline name##ActionP7<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type> name(p0##_type p0, p1##_type p1, \
+ p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \
+ p6##_type p6) {\
+ return name##ActionP7<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type>(p0, p1, p2, p3, p4, p5, p6);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type>\
+ template <typename F>\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ typename ::testing::internal::Function<F>::Result\
+ name##ActionP7<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+ p5##_type, p6##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+ GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P8(name, p0, p1, p2, p3, p4, p5, p6, p7)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type>\
+ class name##ActionP8 {\
+ public:\
+ name##ActionP8(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+ p5##_type gmock_p5, p6##_type gmock_p6, \
+ p7##_type gmock_p7) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+ p7(gmock_p7) {}\
+ template <typename F>\
+ class gmock_Impl : public ::testing::ActionInterface<F> {\
+ public:\
+ typedef F function_type;\
+ typedef typename ::testing::internal::Function<F>::Result return_type;\
+ typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+ args_type;\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+ p6##_type gmock_p6, p7##_type gmock_p7) : p0(gmock_p0), \
+ p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), \
+ p5(gmock_p5), p6(gmock_p6), p7(gmock_p7) {}\
+ virtual return_type Perform(const args_type& args) {\
+ return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+ Perform(this, args);\
+ }\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+ arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+ arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+ arg9_type arg9) const;\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ p7##_type p7;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename F> operator ::testing::Action<F>() const {\
+ return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3, p4, p5, \
+ p6, p7));\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ p7##_type p7;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##ActionP8);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type>\
+ inline name##ActionP8<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type> name(p0##_type p0, \
+ p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \
+ p6##_type p6, p7##_type p7) {\
+ return name##ActionP8<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type>(p0, p1, p2, p3, p4, p5, \
+ p6, p7);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type>\
+ template <typename F>\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ typename ::testing::internal::Function<F>::Result\
+ name##ActionP8<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+ p5##_type, p6##_type, \
+ p7##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+ GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P9(name, p0, p1, p2, p3, p4, p5, p6, p7, p8)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type>\
+ class name##ActionP9 {\
+ public:\
+ name##ActionP9(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+ p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \
+ p8##_type gmock_p8) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \
+ p8(gmock_p8) {}\
+ template <typename F>\
+ class gmock_Impl : public ::testing::ActionInterface<F> {\
+ public:\
+ typedef F function_type;\
+ typedef typename ::testing::internal::Function<F>::Result return_type;\
+ typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+ args_type;\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+ p6##_type gmock_p6, p7##_type gmock_p7, \
+ p8##_type gmock_p8) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+ p7(gmock_p7), p8(gmock_p8) {}\
+ virtual return_type Perform(const args_type& args) {\
+ return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+ Perform(this, args);\
+ }\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+ arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+ arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+ arg9_type arg9) const;\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ p7##_type p7;\
+ p8##_type p8;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename F> operator ::testing::Action<F>() const {\
+ return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3, p4, p5, \
+ p6, p7, p8));\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ p7##_type p7;\
+ p8##_type p8;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##ActionP9);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type>\
+ inline name##ActionP9<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type, \
+ p8##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+ p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, \
+ p8##_type p8) {\
+ return name##ActionP9<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type, p8##_type>(p0, p1, p2, \
+ p3, p4, p5, p6, p7, p8);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type>\
+ template <typename F>\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ typename ::testing::internal::Function<F>::Result\
+ name##ActionP9<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+ p5##_type, p6##_type, p7##_type, \
+ p8##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+ GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+#define ACTION_P10(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type, \
+ typename p9##_type>\
+ class name##ActionP10 {\
+ public:\
+ name##ActionP10(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+ p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \
+ p8##_type gmock_p8, p9##_type gmock_p9) : p0(gmock_p0), p1(gmock_p1), \
+ p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+ p7(gmock_p7), p8(gmock_p8), p9(gmock_p9) {}\
+ template <typename F>\
+ class gmock_Impl : public ::testing::ActionInterface<F> {\
+ public:\
+ typedef F function_type;\
+ typedef typename ::testing::internal::Function<F>::Result return_type;\
+ typedef typename ::testing::internal::Function<F>::ArgumentTuple\
+ args_type;\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+ p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \
+ p9##_type gmock_p9) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+ p7(gmock_p7), p8(gmock_p8), p9(gmock_p9) {}\
+ virtual return_type Perform(const args_type& args) {\
+ return ::testing::internal::ActionHelper<return_type, gmock_Impl>::\
+ Perform(this, args);\
+ }\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \
+ arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \
+ arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \
+ arg9_type arg9) const;\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ p7##_type p7;\
+ p8##_type p8;\
+ p9##_type p9;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename F> operator ::testing::Action<F>() const {\
+ return ::testing::Action<F>(new gmock_Impl<F>(p0, p1, p2, p3, p4, p5, \
+ p6, p7, p8, p9));\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ p7##_type p7;\
+ p8##_type p8;\
+ p9##_type p9;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##ActionP10);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type, \
+ typename p9##_type>\
+ inline name##ActionP10<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type, p8##_type, \
+ p9##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+ p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \
+ p9##_type p9) {\
+ return name##ActionP10<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type, p8##_type, p9##_type>(p0, \
+ p1, p2, p3, p4, p5, p6, p7, p8, p9);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type, \
+ typename p9##_type>\
+ template <typename F>\
+ template <typename arg0_type, typename arg1_type, typename arg2_type, \
+ typename arg3_type, typename arg4_type, typename arg5_type, \
+ typename arg6_type, typename arg7_type, typename arg8_type, \
+ typename arg9_type>\
+ typename ::testing::internal::Function<F>::Result\
+ name##ActionP10<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+ p5##_type, p6##_type, p7##_type, p8##_type, \
+ p9##_type>::gmock_Impl<F>::gmock_PerformImpl(\
+ GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const
+
+// TODO(wan@google.com): move the following to a different .h file
+// such that we don't have to run 'pump' every time the code is
+// updated.
+namespace testing {
+
+// The ACTION*() macros trigger warning C4100 (unreferenced formal
+// parameter) in MSVC with -W4. Unfortunately they cannot be fixed in
+// the macro definition, as the warnings are generated when the macro
+// is expanded and macro expansion cannot contain #pragma. Therefore
+// we suppress them here.
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable:4100)
+#endif
+
+// Various overloads for InvokeArgument<N>().
+//
+// The InvokeArgument<N>(a1, a2, ..., a_k) action invokes the N-th
+// (0-based) argument, which must be a k-ary callable, of the mock
+// function, with arguments a1, a2, ..., a_k.
+//
+// Notes:
+//
+// 1. The arguments are passed by value by default. If you need to
+// pass an argument by reference, wrap it inside ByRef(). For
+// example,
+//
+// InvokeArgument<1>(5, string("Hello"), ByRef(foo))
+//
+// passes 5 and string("Hello") by value, and passes foo by
+// reference.
+//
+// 2. If the callable takes an argument by reference but ByRef() is
+// not used, it will receive the reference to a copy of the value,
+// instead of the original value. For example, when the 0-th
+// argument of the mock function takes a const string&, the action
+//
+// InvokeArgument<0>(string("Hello"))
+//
+// makes a copy of the temporary string("Hello") object and passes a
+// reference of the copy, instead of the original temporary object,
+// to the callable. This makes it easy for a user to define an
+// InvokeArgument action from temporary values and have it performed
+// later.
+
+ACTION_TEMPLATE(InvokeArgument,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_0_VALUE_PARAMS()) {
+ return internal::CallableHelper<return_type>::Call(
+ ::std::tr1::get<k>(args));
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_1_VALUE_PARAMS(p0)) {
+ return internal::CallableHelper<return_type>::Call(
+ ::std::tr1::get<k>(args), p0);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_2_VALUE_PARAMS(p0, p1)) {
+ return internal::CallableHelper<return_type>::Call(
+ ::std::tr1::get<k>(args), p0, p1);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_3_VALUE_PARAMS(p0, p1, p2)) {
+ return internal::CallableHelper<return_type>::Call(
+ ::std::tr1::get<k>(args), p0, p1, p2);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_4_VALUE_PARAMS(p0, p1, p2, p3)) {
+ return internal::CallableHelper<return_type>::Call(
+ ::std::tr1::get<k>(args), p0, p1, p2, p3);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)) {
+ return internal::CallableHelper<return_type>::Call(
+ ::std::tr1::get<k>(args), p0, p1, p2, p3, p4);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)) {
+ return internal::CallableHelper<return_type>::Call(
+ ::std::tr1::get<k>(args), p0, p1, p2, p3, p4, p5);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)) {
+ return internal::CallableHelper<return_type>::Call(
+ ::std::tr1::get<k>(args), p0, p1, p2, p3, p4, p5, p6);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)) {
+ return internal::CallableHelper<return_type>::Call(
+ ::std::tr1::get<k>(args), p0, p1, p2, p3, p4, p5, p6, p7);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8)) {
+ return internal::CallableHelper<return_type>::Call(
+ ::std::tr1::get<k>(args), p0, p1, p2, p3, p4, p5, p6, p7, p8);
+}
+
+ACTION_TEMPLATE(InvokeArgument,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)) {
+ return internal::CallableHelper<return_type>::Call(
+ ::std::tr1::get<k>(args), p0, p1, p2, p3, p4, p5, p6, p7, p8, p9);
+}
+
+// Various overloads for ReturnNew<T>().
+//
+// The ReturnNew<T>(a1, a2, ..., a_k) action returns a pointer to a new
+// instance of type T, constructed on the heap with constructor arguments
+// a1, a2, ..., and a_k. The caller assumes ownership of the returned value.
+ACTION_TEMPLATE(ReturnNew,
+ HAS_1_TEMPLATE_PARAMS(typename, T),
+ AND_0_VALUE_PARAMS()) {
+ return new T();
+}
+
+ACTION_TEMPLATE(ReturnNew,
+ HAS_1_TEMPLATE_PARAMS(typename, T),
+ AND_1_VALUE_PARAMS(p0)) {
+ return new T(p0);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+ HAS_1_TEMPLATE_PARAMS(typename, T),
+ AND_2_VALUE_PARAMS(p0, p1)) {
+ return new T(p0, p1);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+ HAS_1_TEMPLATE_PARAMS(typename, T),
+ AND_3_VALUE_PARAMS(p0, p1, p2)) {
+ return new T(p0, p1, p2);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+ HAS_1_TEMPLATE_PARAMS(typename, T),
+ AND_4_VALUE_PARAMS(p0, p1, p2, p3)) {
+ return new T(p0, p1, p2, p3);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+ HAS_1_TEMPLATE_PARAMS(typename, T),
+ AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)) {
+ return new T(p0, p1, p2, p3, p4);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+ HAS_1_TEMPLATE_PARAMS(typename, T),
+ AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)) {
+ return new T(p0, p1, p2, p3, p4, p5);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+ HAS_1_TEMPLATE_PARAMS(typename, T),
+ AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)) {
+ return new T(p0, p1, p2, p3, p4, p5, p6);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+ HAS_1_TEMPLATE_PARAMS(typename, T),
+ AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)) {
+ return new T(p0, p1, p2, p3, p4, p5, p6, p7);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+ HAS_1_TEMPLATE_PARAMS(typename, T),
+ AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8)) {
+ return new T(p0, p1, p2, p3, p4, p5, p6, p7, p8);
+}
+
+ACTION_TEMPLATE(ReturnNew,
+ HAS_1_TEMPLATE_PARAMS(typename, T),
+ AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)) {
+ return new T(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9);
+}
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
+
+} // namespace testing
+
+#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_
+// This file was GENERATED by command:
+// pump.py gmock-generated-function-mockers.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements function mockers of various arities.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements the ON_CALL() and EXPECT_CALL() macros.
+//
+// A user can use the ON_CALL() macro to specify the default action of
+// a mock method. The syntax is:
+//
+// ON_CALL(mock_object, Method(argument-matchers))
+// .With(multi-argument-matcher)
+// .WillByDefault(action);
+//
+// where the .With() clause is optional.
+//
+// A user can use the EXPECT_CALL() macro to specify an expectation on
+// a mock method. The syntax is:
+//
+// EXPECT_CALL(mock_object, Method(argument-matchers))
+// .With(multi-argument-matchers)
+// .Times(cardinality)
+// .InSequence(sequences)
+// .After(expectations)
+// .WillOnce(action)
+// .WillRepeatedly(action)
+// .RetiresOnSaturation();
+//
+// where all clauses are optional, and .InSequence()/.After()/
+// .WillOnce() can appear any number of times.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_SPEC_BUILDERS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_SPEC_BUILDERS_H_
+
+#include <map>
+#include <set>
+#include <sstream>
+#include <string>
+#include <vector>
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some commonly used argument matchers. More
+// matchers can be defined by the user implementing the
+// MatcherInterface<T> interface if necessary.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_
+
+#include <algorithm>
+#include <limits>
+#include <ostream> // NOLINT
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+
+namespace testing {
+
+// To implement a matcher Foo for type T, define:
+// 1. a class FooMatcherImpl that implements the
+// MatcherInterface<T> interface, and
+// 2. a factory function that creates a Matcher<T> object from a
+// FooMatcherImpl*.
+//
+// The two-level delegation design makes it possible to allow a user
+// to write "v" instead of "Eq(v)" where a Matcher is expected, which
+// is impossible if we pass matchers by pointers. It also eases
+// ownership management as Matcher objects can now be copied like
+// plain values.
+
+// MatchResultListener is an abstract class. Its << operator can be
+// used by a matcher to explain why a value matches or doesn't match.
+//
+// TODO(wan@google.com): add method
+// bool InterestedInWhy(bool result) const;
+// to indicate whether the listener is interested in why the match
+// result is 'result'.
+class MatchResultListener {
+ public:
+ // Creates a listener object with the given underlying ostream. The
+ // listener does not own the ostream.
+ explicit MatchResultListener(::std::ostream* os) : stream_(os) {}
+ virtual ~MatchResultListener() = 0; // Makes this class abstract.
+
+ // Streams x to the underlying ostream; does nothing if the ostream
+ // is NULL.
+ template <typename T>
+ MatchResultListener& operator<<(const T& x) {
+ if (stream_ != NULL)
+ *stream_ << x;
+ return *this;
+ }
+
+ // Returns the underlying ostream.
+ ::std::ostream* stream() { return stream_; }
+
+ // Returns true iff the listener is interested in an explanation of
+ // the match result. A matcher's MatchAndExplain() method can use
+ // this information to avoid generating the explanation when no one
+ // intends to hear it.
+ bool IsInterested() const { return stream_ != NULL; }
+
+ private:
+ ::std::ostream* const stream_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MatchResultListener);
+};
+
+inline MatchResultListener::~MatchResultListener() {
+}
+
+// The implementation of a matcher.
+template <typename T>
+class MatcherInterface {
+ public:
+ virtual ~MatcherInterface() {}
+
+ // Returns true iff the matcher matches x; also explains the match
+ // result to 'listener', in the form of a non-restrictive relative
+ // clause ("which ...", "whose ...", etc) that describes x. For
+ // example, the MatchAndExplain() method of the Pointee(...) matcher
+ // should generate an explanation like "which points to ...".
+ //
+ // You should override this method when defining a new matcher.
+ //
+ // It's the responsibility of the caller (Google Mock) to guarantee
+ // that 'listener' is not NULL. This helps to simplify a matcher's
+ // implementation when it doesn't care about the performance, as it
+ // can talk to 'listener' without checking its validity first.
+ // However, in order to implement dummy listeners efficiently,
+ // listener->stream() may be NULL.
+ virtual bool MatchAndExplain(T x, MatchResultListener* listener) const = 0;
+
+ // Describes this matcher to an ostream. The function should print
+ // a verb phrase that describes the property a value matching this
+ // matcher should have. The subject of the verb phrase is the value
+ // being matched. For example, the DescribeTo() method of the Gt(7)
+ // matcher prints "is greater than 7".
+ virtual void DescribeTo(::std::ostream* os) const = 0;
+
+ // Describes the negation of this matcher to an ostream. For
+ // example, if the description of this matcher is "is greater than
+ // 7", the negated description could be "is not greater than 7".
+ // You are not required to override this when implementing
+ // MatcherInterface, but it is highly advised so that your matcher
+ // can produce good error messages.
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "not (";
+ DescribeTo(os);
+ *os << ")";
+ }
+};
+
+namespace internal {
+
+// A match result listener that ignores the explanation.
+class DummyMatchResultListener : public MatchResultListener {
+ public:
+ DummyMatchResultListener() : MatchResultListener(NULL) {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DummyMatchResultListener);
+};
+
+// A match result listener that forwards the explanation to a given
+// ostream. The difference between this and MatchResultListener is
+// that the former is concrete.
+class StreamMatchResultListener : public MatchResultListener {
+ public:
+ explicit StreamMatchResultListener(::std::ostream* os)
+ : MatchResultListener(os) {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamMatchResultListener);
+};
+
+// A match result listener that stores the explanation in a string.
+class StringMatchResultListener : public MatchResultListener {
+ public:
+ StringMatchResultListener() : MatchResultListener(&ss_) {}
+
+ // Returns the explanation heard so far.
+ internal::string str() const { return ss_.str(); }
+
+ private:
+ ::std::stringstream ss_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(StringMatchResultListener);
+};
+
+// An internal class for implementing Matcher<T>, which will derive
+// from it. We put functionalities common to all Matcher<T>
+// specializations here to avoid code duplication.
+template <typename T>
+class MatcherBase {
+ public:
+ // Returns true iff the matcher matches x; also explains the match
+ // result to 'listener'.
+ bool MatchAndExplain(T x, MatchResultListener* listener) const {
+ return impl_->MatchAndExplain(x, listener);
+ }
+
+ // Returns true iff this matcher matches x.
+ bool Matches(T x) const {
+ DummyMatchResultListener dummy;
+ return MatchAndExplain(x, &dummy);
+ }
+
+ // Describes this matcher to an ostream.
+ void DescribeTo(::std::ostream* os) const { impl_->DescribeTo(os); }
+
+ // Describes the negation of this matcher to an ostream.
+ void DescribeNegationTo(::std::ostream* os) const {
+ impl_->DescribeNegationTo(os);
+ }
+
+ // Explains why x matches, or doesn't match, the matcher.
+ void ExplainMatchResultTo(T x, ::std::ostream* os) const {
+ StreamMatchResultListener listener(os);
+ MatchAndExplain(x, &listener);
+ }
+
+ protected:
+ MatcherBase() {}
+
+ // Constructs a matcher from its implementation.
+ explicit MatcherBase(const MatcherInterface<T>* impl)
+ : impl_(impl) {}
+
+ virtual ~MatcherBase() {}
+
+ private:
+ // shared_ptr (util/gtl/shared_ptr.h) and linked_ptr have similar
+ // interfaces. The former dynamically allocates a chunk of memory
+ // to hold the reference count, while the latter tracks all
+ // references using a circular linked list without allocating
+ // memory. It has been observed that linked_ptr performs better in
+ // typical scenarios. However, shared_ptr can out-perform
+ // linked_ptr when there are many more uses of the copy constructor
+ // than the default constructor.
+ //
+ // If performance becomes a problem, we should see if using
+ // shared_ptr helps.
+ ::testing::internal::linked_ptr<const MatcherInterface<T> > impl_;
+};
+
+} // namespace internal
+
+// A Matcher<T> is a copyable and IMMUTABLE (except by assignment)
+// object that can check whether a value of type T matches. The
+// implementation of Matcher<T> is just a linked_ptr to const
+// MatcherInterface<T>, so copying is fairly cheap. Don't inherit
+// from Matcher!
+template <typename T>
+class Matcher : public internal::MatcherBase<T> {
+ public:
+ // Constructs a null matcher. Needed for storing Matcher objects in STL
+ // containers. A default-constructed matcher is not yet initialized. You
+ // cannot use it until a valid value has been assigned to it.
+ Matcher() {}
+
+ // Constructs a matcher from its implementation.
+ explicit Matcher(const MatcherInterface<T>* impl)
+ : internal::MatcherBase<T>(impl) {}
+
+ // Implicit constructor here allows people to write
+ // EXPECT_CALL(foo, Bar(5)) instead of EXPECT_CALL(foo, Bar(Eq(5))) sometimes
+ Matcher(T value); // NOLINT
+};
+
+// The following two specializations allow the user to write str
+// instead of Eq(str) and "foo" instead of Eq("foo") when a string
+// matcher is expected.
+template <>
+class Matcher<const internal::string&>
+ : public internal::MatcherBase<const internal::string&> {
+ public:
+ Matcher() {}
+
+ explicit Matcher(const MatcherInterface<const internal::string&>* impl)
+ : internal::MatcherBase<const internal::string&>(impl) {}
+
+ // Allows the user to write str instead of Eq(str) sometimes, where
+ // str is a string object.
+ Matcher(const internal::string& s); // NOLINT
+
+ // Allows the user to write "foo" instead of Eq("foo") sometimes.
+ Matcher(const char* s); // NOLINT
+};
+
+template <>
+class Matcher<internal::string>
+ : public internal::MatcherBase<internal::string> {
+ public:
+ Matcher() {}
+
+ explicit Matcher(const MatcherInterface<internal::string>* impl)
+ : internal::MatcherBase<internal::string>(impl) {}
+
+ // Allows the user to write str instead of Eq(str) sometimes, where
+ // str is a string object.
+ Matcher(const internal::string& s); // NOLINT
+
+ // Allows the user to write "foo" instead of Eq("foo") sometimes.
+ Matcher(const char* s); // NOLINT
+};
+
+// The PolymorphicMatcher class template makes it easy to implement a
+// polymorphic matcher (i.e. a matcher that can match values of more
+// than one type, e.g. Eq(n) and NotNull()).
+//
+// To define a polymorphic matcher, a user should provide an Impl
+// class that has a DescribeTo() method and a DescribeNegationTo()
+// method, and define a member function (or member function template)
+//
+// bool MatchAndExplain(const Value& value,
+// MatchResultListener* listener) const;
+//
+// See the definition of NotNull() for a complete example.
+template <class Impl>
+class PolymorphicMatcher {
+ public:
+ explicit PolymorphicMatcher(const Impl& an_impl) : impl_(an_impl) {}
+
+ // Returns a mutable reference to the underlying matcher
+ // implementation object.
+ Impl& mutable_impl() { return impl_; }
+
+ // Returns an immutable reference to the underlying matcher
+ // implementation object.
+ const Impl& impl() const { return impl_; }
+
+ template <typename T>
+ operator Matcher<T>() const {
+ return Matcher<T>(new MonomorphicImpl<T>(impl_));
+ }
+
+ private:
+ template <typename T>
+ class MonomorphicImpl : public MatcherInterface<T> {
+ public:
+ explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {}
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ impl_.DescribeTo(os);
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ impl_.DescribeNegationTo(os);
+ }
+
+ virtual bool MatchAndExplain(T x, MatchResultListener* listener) const {
+ return impl_.MatchAndExplain(x, listener);
+ }
+
+ private:
+ const Impl impl_;
+
+ GTEST_DISALLOW_ASSIGN_(MonomorphicImpl);
+ };
+
+ Impl impl_;
+
+ GTEST_DISALLOW_ASSIGN_(PolymorphicMatcher);
+};
+
+// Creates a matcher from its implementation. This is easier to use
+// than the Matcher<T> constructor as it doesn't require you to
+// explicitly write the template argument, e.g.
+//
+// MakeMatcher(foo);
+// vs
+// Matcher<const string&>(foo);
+template <typename T>
+inline Matcher<T> MakeMatcher(const MatcherInterface<T>* impl) {
+ return Matcher<T>(impl);
+};
+
+// Creates a polymorphic matcher from its implementation. This is
+// easier to use than the PolymorphicMatcher<Impl> constructor as it
+// doesn't require you to explicitly write the template argument, e.g.
+//
+// MakePolymorphicMatcher(foo);
+// vs
+// PolymorphicMatcher<TypeOfFoo>(foo);
+template <class Impl>
+inline PolymorphicMatcher<Impl> MakePolymorphicMatcher(const Impl& impl) {
+ return PolymorphicMatcher<Impl>(impl);
+}
+
+// In order to be safe and clear, casting between different matcher
+// types is done explicitly via MatcherCast<T>(m), which takes a
+// matcher m and returns a Matcher<T>. It compiles only when T can be
+// statically converted to the argument type of m.
+template <typename T, typename M>
+Matcher<T> MatcherCast(M m);
+
+// Implements SafeMatcherCast().
+//
+// We use an intermediate class to do the actual safe casting as Nokia's
+// Symbian compiler cannot decide between
+// template <T, M> ... (M) and
+// template <T, U> ... (const Matcher<U>&)
+// for function templates but can for member function templates.
+template <typename T>
+class SafeMatcherCastImpl {
+ public:
+ // This overload handles polymorphic matchers only since monomorphic
+ // matchers are handled by the next one.
+ template <typename M>
+ static inline Matcher<T> Cast(M polymorphic_matcher) {
+ return Matcher<T>(polymorphic_matcher);
+ }
+
+ // This overload handles monomorphic matchers.
+ //
+ // In general, if type T can be implicitly converted to type U, we can
+ // safely convert a Matcher<U> to a Matcher<T> (i.e. Matcher is
+ // contravariant): just keep a copy of the original Matcher<U>, convert the
+ // argument from type T to U, and then pass it to the underlying Matcher<U>.
+ // The only exception is when U is a reference and T is not, as the
+ // underlying Matcher<U> may be interested in the argument's address, which
+ // is not preserved in the conversion from T to U.
+ template <typename U>
+ static inline Matcher<T> Cast(const Matcher<U>& matcher) {
+ // Enforce that T can be implicitly converted to U.
+ GTEST_COMPILE_ASSERT_((internal::ImplicitlyConvertible<T, U>::value),
+ T_must_be_implicitly_convertible_to_U);
+ // Enforce that we are not converting a non-reference type T to a reference
+ // type U.
+ GTEST_COMPILE_ASSERT_(
+ internal::is_reference<T>::value || !internal::is_reference<U>::value,
+ cannot_convert_non_referentce_arg_to_reference);
+ // In case both T and U are arithmetic types, enforce that the
+ // conversion is not lossy.
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(T) RawT;
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(U) RawU;
+ const bool kTIsOther = GMOCK_KIND_OF_(RawT) == internal::kOther;
+ const bool kUIsOther = GMOCK_KIND_OF_(RawU) == internal::kOther;
+ GTEST_COMPILE_ASSERT_(
+ kTIsOther || kUIsOther ||
+ (internal::LosslessArithmeticConvertible<RawT, RawU>::value),
+ conversion_of_arithmetic_types_must_be_lossless);
+ return MatcherCast<T>(matcher);
+ }
+};
+
+template <typename T, typename M>
+inline Matcher<T> SafeMatcherCast(const M& polymorphic_matcher) {
+ return SafeMatcherCastImpl<T>::Cast(polymorphic_matcher);
+}
+
+// A<T>() returns a matcher that matches any value of type T.
+template <typename T>
+Matcher<T> A();
+
+// Anything inside the 'internal' namespace IS INTERNAL IMPLEMENTATION
+// and MUST NOT BE USED IN USER CODE!!!
+namespace internal {
+
+// If the explanation is not empty, prints it to the ostream.
+inline void PrintIfNotEmpty(const internal::string& explanation,
+ std::ostream* os) {
+ if (explanation != "" && os != NULL) {
+ *os << ", " << explanation;
+ }
+}
+
+// Returns true if the given type name is easy to read by a human.
+// This is used to decide whether printing the type of a value might
+// be helpful.
+inline bool IsReadableTypeName(const string& type_name) {
+ // We consider a type name readable if it's short or doesn't contain
+ // a template or function type.
+ return (type_name.length() <= 20 ||
+ type_name.find_first_of("<(") == string::npos);
+}
+
+// Matches the value against the given matcher, prints the value and explains
+// the match result to the listener. Returns the match result.
+// 'listener' must not be NULL.
+// Value cannot be passed by const reference, because some matchers take a
+// non-const argument.
+template <typename Value, typename T>
+bool MatchPrintAndExplain(Value& value, const Matcher<T>& matcher,
+ MatchResultListener* listener) {
+ if (!listener->IsInterested()) {
+ // If the listener is not interested, we do not need to construct the
+ // inner explanation.
+ return matcher.Matches(value);
+ }
+
+ StringMatchResultListener inner_listener;
+ const bool match = matcher.MatchAndExplain(value, &inner_listener);
+
+ UniversalPrint(value, listener->stream());
+#if GTEST_HAS_RTTI
+ const string& type_name = GetTypeName<Value>();
+ if (IsReadableTypeName(type_name))
+ *listener->stream() << " (of type " << type_name << ")";
+#endif
+ PrintIfNotEmpty(inner_listener.str(), listener->stream());
+
+ return match;
+}
+
+// An internal helper class for doing compile-time loop on a tuple's
+// fields.
+template <size_t N>
+class TuplePrefix {
+ public:
+ // TuplePrefix<N>::Matches(matcher_tuple, value_tuple) returns true
+ // iff the first N fields of matcher_tuple matches the first N
+ // fields of value_tuple, respectively.
+ template <typename MatcherTuple, typename ValueTuple>
+ static bool Matches(const MatcherTuple& matcher_tuple,
+ const ValueTuple& value_tuple) {
+ using ::std::tr1::get;
+ return TuplePrefix<N - 1>::Matches(matcher_tuple, value_tuple)
+ && get<N - 1>(matcher_tuple).Matches(get<N - 1>(value_tuple));
+ }
+
+ // TuplePrefix<N>::ExplainMatchFailuresTo(matchers, values, os)
+ // describes failures in matching the first N fields of matchers
+ // against the first N fields of values. If there is no failure,
+ // nothing will be streamed to os.
+ template <typename MatcherTuple, typename ValueTuple>
+ static void ExplainMatchFailuresTo(const MatcherTuple& matchers,
+ const ValueTuple& values,
+ ::std::ostream* os) {
+ using ::std::tr1::tuple_element;
+ using ::std::tr1::get;
+
+ // First, describes failures in the first N - 1 fields.
+ TuplePrefix<N - 1>::ExplainMatchFailuresTo(matchers, values, os);
+
+ // Then describes the failure (if any) in the (N - 1)-th (0-based)
+ // field.
+ typename tuple_element<N - 1, MatcherTuple>::type matcher =
+ get<N - 1>(matchers);
+ typedef typename tuple_element<N - 1, ValueTuple>::type Value;
+ Value value = get<N - 1>(values);
+ StringMatchResultListener listener;
+ if (!matcher.MatchAndExplain(value, &listener)) {
+ // TODO(wan): include in the message the name of the parameter
+ // as used in MOCK_METHOD*() when possible.
+ *os << " Expected arg #" << N - 1 << ": ";
+ get<N - 1>(matchers).DescribeTo(os);
+ *os << "\n Actual: ";
+ // We remove the reference in type Value to prevent the
+ // universal printer from printing the address of value, which
+ // isn't interesting to the user most of the time. The
+ // matcher's MatchAndExplain() method handles the case when
+ // the address is interesting.
+ internal::UniversalPrint(value, os);
+ PrintIfNotEmpty(listener.str(), os);
+ *os << "\n";
+ }
+ }
+};
+
+// The base case.
+template <>
+class TuplePrefix<0> {
+ public:
+ template <typename MatcherTuple, typename ValueTuple>
+ static bool Matches(const MatcherTuple& /* matcher_tuple */,
+ const ValueTuple& /* value_tuple */) {
+ return true;
+ }
+
+ template <typename MatcherTuple, typename ValueTuple>
+ static void ExplainMatchFailuresTo(const MatcherTuple& /* matchers */,
+ const ValueTuple& /* values */,
+ ::std::ostream* /* os */) {}
+};
+
+// TupleMatches(matcher_tuple, value_tuple) returns true iff all
+// matchers in matcher_tuple match the corresponding fields in
+// value_tuple. It is a compiler error if matcher_tuple and
+// value_tuple have different number of fields or incompatible field
+// types.
+template <typename MatcherTuple, typename ValueTuple>
+bool TupleMatches(const MatcherTuple& matcher_tuple,
+ const ValueTuple& value_tuple) {
+ using ::std::tr1::tuple_size;
+ // Makes sure that matcher_tuple and value_tuple have the same
+ // number of fields.
+ GTEST_COMPILE_ASSERT_(tuple_size<MatcherTuple>::value ==
+ tuple_size<ValueTuple>::value,
+ matcher_and_value_have_different_numbers_of_fields);
+ return TuplePrefix<tuple_size<ValueTuple>::value>::
+ Matches(matcher_tuple, value_tuple);
+}
+
+// Describes failures in matching matchers against values. If there
+// is no failure, nothing will be streamed to os.
+template <typename MatcherTuple, typename ValueTuple>
+void ExplainMatchFailureTupleTo(const MatcherTuple& matchers,
+ const ValueTuple& values,
+ ::std::ostream* os) {
+ using ::std::tr1::tuple_size;
+ TuplePrefix<tuple_size<MatcherTuple>::value>::ExplainMatchFailuresTo(
+ matchers, values, os);
+}
+
+// The MatcherCastImpl class template is a helper for implementing
+// MatcherCast(). We need this helper in order to partially
+// specialize the implementation of MatcherCast() (C++ allows
+// class/struct templates to be partially specialized, but not
+// function templates.).
+
+// This general version is used when MatcherCast()'s argument is a
+// polymorphic matcher (i.e. something that can be converted to a
+// Matcher but is not one yet; for example, Eq(value)).
+template <typename T, typename M>
+class MatcherCastImpl {
+ public:
+ static Matcher<T> Cast(M polymorphic_matcher) {
+ return Matcher<T>(polymorphic_matcher);
+ }
+};
+
+// This more specialized version is used when MatcherCast()'s argument
+// is already a Matcher. This only compiles when type T can be
+// statically converted to type U.
+template <typename T, typename U>
+class MatcherCastImpl<T, Matcher<U> > {
+ public:
+ static Matcher<T> Cast(const Matcher<U>& source_matcher) {
+ return Matcher<T>(new Impl(source_matcher));
+ }
+
+ private:
+ class Impl : public MatcherInterface<T> {
+ public:
+ explicit Impl(const Matcher<U>& source_matcher)
+ : source_matcher_(source_matcher) {}
+
+ // We delegate the matching logic to the source matcher.
+ virtual bool MatchAndExplain(T x, MatchResultListener* listener) const {
+ return source_matcher_.MatchAndExplain(static_cast<U>(x), listener);
+ }
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ source_matcher_.DescribeTo(os);
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ source_matcher_.DescribeNegationTo(os);
+ }
+
+ private:
+ const Matcher<U> source_matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(Impl);
+ };
+};
+
+// This even more specialized version is used for efficiently casting
+// a matcher to its own type.
+template <typename T>
+class MatcherCastImpl<T, Matcher<T> > {
+ public:
+ static Matcher<T> Cast(const Matcher<T>& matcher) { return matcher; }
+};
+
+// Implements A<T>().
+template <typename T>
+class AnyMatcherImpl : public MatcherInterface<T> {
+ public:
+ virtual bool MatchAndExplain(
+ T /* x */, MatchResultListener* /* listener */) const { return true; }
+ virtual void DescribeTo(::std::ostream* os) const { *os << "is anything"; }
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ // This is mostly for completeness' safe, as it's not very useful
+ // to write Not(A<bool>()). However we cannot completely rule out
+ // such a possibility, and it doesn't hurt to be prepared.
+ *os << "never matches";
+ }
+};
+
+// Implements _, a matcher that matches any value of any
+// type. This is a polymorphic matcher, so we need a template type
+// conversion operator to make it appearing as a Matcher<T> for any
+// type T.
+class AnythingMatcher {
+ public:
+ template <typename T>
+ operator Matcher<T>() const { return A<T>(); }
+};
+
+// Implements a matcher that compares a given value with a
+// pre-supplied value using one of the ==, <=, <, etc, operators. The
+// two values being compared don't have to have the same type.
+//
+// The matcher defined here is polymorphic (for example, Eq(5) can be
+// used to match an int, a short, a double, etc). Therefore we use
+// a template type conversion operator in the implementation.
+//
+// We define this as a macro in order to eliminate duplicated source
+// code.
+//
+// The following template definition assumes that the Rhs parameter is
+// a "bare" type (i.e. neither 'const T' nor 'T&').
+#define GMOCK_IMPLEMENT_COMPARISON_MATCHER_( \
+ name, op, relation, negated_relation) \
+ template <typename Rhs> class name##Matcher { \
+ public: \
+ explicit name##Matcher(const Rhs& rhs) : rhs_(rhs) {} \
+ template <typename Lhs> \
+ operator Matcher<Lhs>() const { \
+ return MakeMatcher(new Impl<Lhs>(rhs_)); \
+ } \
+ private: \
+ template <typename Lhs> \
+ class Impl : public MatcherInterface<Lhs> { \
+ public: \
+ explicit Impl(const Rhs& rhs) : rhs_(rhs) {} \
+ virtual bool MatchAndExplain(\
+ Lhs lhs, MatchResultListener* /* listener */) const { \
+ return lhs op rhs_; \
+ } \
+ virtual void DescribeTo(::std::ostream* os) const { \
+ *os << relation " "; \
+ UniversalPrint(rhs_, os); \
+ } \
+ virtual void DescribeNegationTo(::std::ostream* os) const { \
+ *os << negated_relation " "; \
+ UniversalPrint(rhs_, os); \
+ } \
+ private: \
+ Rhs rhs_; \
+ GTEST_DISALLOW_ASSIGN_(Impl); \
+ }; \
+ Rhs rhs_; \
+ GTEST_DISALLOW_ASSIGN_(name##Matcher); \
+ }
+
+// Implements Eq(v), Ge(v), Gt(v), Le(v), Lt(v), and Ne(v)
+// respectively.
+GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Eq, ==, "is equal to", "isn't equal to");
+GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Ge, >=, "is >=", "isn't >=");
+GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Gt, >, "is >", "isn't >");
+GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Le, <=, "is <=", "isn't <=");
+GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Lt, <, "is <", "isn't <");
+GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Ne, !=, "isn't equal to", "is equal to");
+
+#undef GMOCK_IMPLEMENT_COMPARISON_MATCHER_
+
+// Implements the polymorphic IsNull() matcher, which matches any raw or smart
+// pointer that is NULL.
+class IsNullMatcher {
+ public:
+ template <typename Pointer>
+ bool MatchAndExplain(const Pointer& p,
+ MatchResultListener* /* listener */) const {
+ return GetRawPointer(p) == NULL;
+ }
+
+ void DescribeTo(::std::ostream* os) const { *os << "is NULL"; }
+ void DescribeNegationTo(::std::ostream* os) const {
+ *os << "isn't NULL";
+ }
+};
+
+// Implements the polymorphic NotNull() matcher, which matches any raw or smart
+// pointer that is not NULL.
+class NotNullMatcher {
+ public:
+ template <typename Pointer>
+ bool MatchAndExplain(const Pointer& p,
+ MatchResultListener* /* listener */) const {
+ return GetRawPointer(p) != NULL;
+ }
+
+ void DescribeTo(::std::ostream* os) const { *os << "isn't NULL"; }
+ void DescribeNegationTo(::std::ostream* os) const {
+ *os << "is NULL";
+ }
+};
+
+// Ref(variable) matches any argument that is a reference to
+// 'variable'. This matcher is polymorphic as it can match any
+// super type of the type of 'variable'.
+//
+// The RefMatcher template class implements Ref(variable). It can
+// only be instantiated with a reference type. This prevents a user
+// from mistakenly using Ref(x) to match a non-reference function
+// argument. For example, the following will righteously cause a
+// compiler error:
+//
+// int n;
+// Matcher<int> m1 = Ref(n); // This won't compile.
+// Matcher<int&> m2 = Ref(n); // This will compile.
+template <typename T>
+class RefMatcher;
+
+template <typename T>
+class RefMatcher<T&> {
+ // Google Mock is a generic framework and thus needs to support
+ // mocking any function types, including those that take non-const
+ // reference arguments. Therefore the template parameter T (and
+ // Super below) can be instantiated to either a const type or a
+ // non-const type.
+ public:
+ // RefMatcher() takes a T& instead of const T&, as we want the
+ // compiler to catch using Ref(const_value) as a matcher for a
+ // non-const reference.
+ explicit RefMatcher(T& x) : object_(x) {} // NOLINT
+
+ template <typename Super>
+ operator Matcher<Super&>() const {
+ // By passing object_ (type T&) to Impl(), which expects a Super&,
+ // we make sure that Super is a super type of T. In particular,
+ // this catches using Ref(const_value) as a matcher for a
+ // non-const reference, as you cannot implicitly convert a const
+ // reference to a non-const reference.
+ return MakeMatcher(new Impl<Super>(object_));
+ }
+
+ private:
+ template <typename Super>
+ class Impl : public MatcherInterface<Super&> {
+ public:
+ explicit Impl(Super& x) : object_(x) {} // NOLINT
+
+ // MatchAndExplain() takes a Super& (as opposed to const Super&)
+ // in order to match the interface MatcherInterface<Super&>.
+ virtual bool MatchAndExplain(
+ Super& x, MatchResultListener* listener) const {
+ *listener << "which is located @" << static_cast<const void*>(&x);
+ return &x == &object_;
+ }
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "references the variable ";
+ UniversalPrinter<Super&>::Print(object_, os);
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "does not reference the variable ";
+ UniversalPrinter<Super&>::Print(object_, os);
+ }
+
+ private:
+ const Super& object_;
+
+ GTEST_DISALLOW_ASSIGN_(Impl);
+ };
+
+ T& object_;
+
+ GTEST_DISALLOW_ASSIGN_(RefMatcher);
+};
+
+// Polymorphic helper functions for narrow and wide string matchers.
+inline bool CaseInsensitiveCStringEquals(const char* lhs, const char* rhs) {
+ return String::CaseInsensitiveCStringEquals(lhs, rhs);
+}
+
+inline bool CaseInsensitiveCStringEquals(const wchar_t* lhs,
+ const wchar_t* rhs) {
+ return String::CaseInsensitiveWideCStringEquals(lhs, rhs);
+}
+
+// String comparison for narrow or wide strings that can have embedded NUL
+// characters.
+template <typename StringType>
+bool CaseInsensitiveStringEquals(const StringType& s1,
+ const StringType& s2) {
+ // Are the heads equal?
+ if (!CaseInsensitiveCStringEquals(s1.c_str(), s2.c_str())) {
+ return false;
+ }
+
+ // Skip the equal heads.
+ const typename StringType::value_type nul = 0;
+ const size_t i1 = s1.find(nul), i2 = s2.find(nul);
+
+ // Are we at the end of either s1 or s2?
+ if (i1 == StringType::npos || i2 == StringType::npos) {
+ return i1 == i2;
+ }
+
+ // Are the tails equal?
+ return CaseInsensitiveStringEquals(s1.substr(i1 + 1), s2.substr(i2 + 1));
+}
+
+// String matchers.
+
+// Implements equality-based string matchers like StrEq, StrCaseNe, and etc.
+template <typename StringType>
+class StrEqualityMatcher {
+ public:
+ typedef typename StringType::const_pointer ConstCharPointer;
+
+ StrEqualityMatcher(const StringType& str, bool expect_eq,
+ bool case_sensitive)
+ : string_(str), expect_eq_(expect_eq), case_sensitive_(case_sensitive) {}
+
+ // When expect_eq_ is true, returns true iff s is equal to string_;
+ // otherwise returns true iff s is not equal to string_.
+ bool MatchAndExplain(ConstCharPointer s,
+ MatchResultListener* listener) const {
+ if (s == NULL) {
+ return !expect_eq_;
+ }
+ return MatchAndExplain(StringType(s), listener);
+ }
+
+ bool MatchAndExplain(const StringType& s,
+ MatchResultListener* /* listener */) const {
+ const bool eq = case_sensitive_ ? s == string_ :
+ CaseInsensitiveStringEquals(s, string_);
+ return expect_eq_ == eq;
+ }
+
+ void DescribeTo(::std::ostream* os) const {
+ DescribeToHelper(expect_eq_, os);
+ }
+
+ void DescribeNegationTo(::std::ostream* os) const {
+ DescribeToHelper(!expect_eq_, os);
+ }
+
+ private:
+ void DescribeToHelper(bool expect_eq, ::std::ostream* os) const {
+ *os << (expect_eq ? "is " : "isn't ");
+ *os << "equal to ";
+ if (!case_sensitive_) {
+ *os << "(ignoring case) ";
+ }
+ UniversalPrint(string_, os);
+ }
+
+ const StringType string_;
+ const bool expect_eq_;
+ const bool case_sensitive_;
+
+ GTEST_DISALLOW_ASSIGN_(StrEqualityMatcher);
+};
+
+// Implements the polymorphic HasSubstr(substring) matcher, which
+// can be used as a Matcher<T> as long as T can be converted to a
+// string.
+template <typename StringType>
+class HasSubstrMatcher {
+ public:
+ typedef typename StringType::const_pointer ConstCharPointer;
+
+ explicit HasSubstrMatcher(const StringType& substring)
+ : substring_(substring) {}
+
+ // These overloaded methods allow HasSubstr(substring) to be used as a
+ // Matcher<T> as long as T can be converted to string. Returns true
+ // iff s contains substring_ as a substring.
+ bool MatchAndExplain(ConstCharPointer s,
+ MatchResultListener* listener) const {
+ return s != NULL && MatchAndExplain(StringType(s), listener);
+ }
+
+ bool MatchAndExplain(const StringType& s,
+ MatchResultListener* /* listener */) const {
+ return s.find(substring_) != StringType::npos;
+ }
+
+ // Describes what this matcher matches.
+ void DescribeTo(::std::ostream* os) const {
+ *os << "has substring ";
+ UniversalPrint(substring_, os);
+ }
+
+ void DescribeNegationTo(::std::ostream* os) const {
+ *os << "has no substring ";
+ UniversalPrint(substring_, os);
+ }
+
+ private:
+ const StringType substring_;
+
+ GTEST_DISALLOW_ASSIGN_(HasSubstrMatcher);
+};
+
+// Implements the polymorphic StartsWith(substring) matcher, which
+// can be used as a Matcher<T> as long as T can be converted to a
+// string.
+template <typename StringType>
+class StartsWithMatcher {
+ public:
+ typedef typename StringType::const_pointer ConstCharPointer;
+
+ explicit StartsWithMatcher(const StringType& prefix) : prefix_(prefix) {
+ }
+
+ // These overloaded methods allow StartsWith(prefix) to be used as a
+ // Matcher<T> as long as T can be converted to string. Returns true
+ // iff s starts with prefix_.
+ bool MatchAndExplain(ConstCharPointer s,
+ MatchResultListener* listener) const {
+ return s != NULL && MatchAndExplain(StringType(s), listener);
+ }
+
+ bool MatchAndExplain(const StringType& s,
+ MatchResultListener* /* listener */) const {
+ return s.length() >= prefix_.length() &&
+ s.substr(0, prefix_.length()) == prefix_;
+ }
+
+ void DescribeTo(::std::ostream* os) const {
+ *os << "starts with ";
+ UniversalPrint(prefix_, os);
+ }
+
+ void DescribeNegationTo(::std::ostream* os) const {
+ *os << "doesn't start with ";
+ UniversalPrint(prefix_, os);
+ }
+
+ private:
+ const StringType prefix_;
+
+ GTEST_DISALLOW_ASSIGN_(StartsWithMatcher);
+};
+
+// Implements the polymorphic EndsWith(substring) matcher, which
+// can be used as a Matcher<T> as long as T can be converted to a
+// string.
+template <typename StringType>
+class EndsWithMatcher {
+ public:
+ typedef typename StringType::const_pointer ConstCharPointer;
+
+ explicit EndsWithMatcher(const StringType& suffix) : suffix_(suffix) {}
+
+ // These overloaded methods allow EndsWith(suffix) to be used as a
+ // Matcher<T> as long as T can be converted to string. Returns true
+ // iff s ends with suffix_.
+ bool MatchAndExplain(ConstCharPointer s,
+ MatchResultListener* listener) const {
+ return s != NULL && MatchAndExplain(StringType(s), listener);
+ }
+
+ bool MatchAndExplain(const StringType& s,
+ MatchResultListener* /* listener */) const {
+ return s.length() >= suffix_.length() &&
+ s.substr(s.length() - suffix_.length()) == suffix_;
+ }
+
+ void DescribeTo(::std::ostream* os) const {
+ *os << "ends with ";
+ UniversalPrint(suffix_, os);
+ }
+
+ void DescribeNegationTo(::std::ostream* os) const {
+ *os << "doesn't end with ";
+ UniversalPrint(suffix_, os);
+ }
+
+ private:
+ const StringType suffix_;
+
+ GTEST_DISALLOW_ASSIGN_(EndsWithMatcher);
+};
+
+// Implements polymorphic matchers MatchesRegex(regex) and
+// ContainsRegex(regex), which can be used as a Matcher<T> as long as
+// T can be converted to a string.
+class MatchesRegexMatcher {
+ public:
+ MatchesRegexMatcher(const RE* regex, bool full_match)
+ : regex_(regex), full_match_(full_match) {}
+
+ // These overloaded methods allow MatchesRegex(regex) to be used as
+ // a Matcher<T> as long as T can be converted to string. Returns
+ // true iff s matches regular expression regex. When full_match_ is
+ // true, a full match is done; otherwise a partial match is done.
+ bool MatchAndExplain(const char* s,
+ MatchResultListener* listener) const {
+ return s != NULL && MatchAndExplain(internal::string(s), listener);
+ }
+
+ bool MatchAndExplain(const internal::string& s,
+ MatchResultListener* /* listener */) const {
+ return full_match_ ? RE::FullMatch(s, *regex_) :
+ RE::PartialMatch(s, *regex_);
+ }
+
+ void DescribeTo(::std::ostream* os) const {
+ *os << (full_match_ ? "matches" : "contains")
+ << " regular expression ";
+ UniversalPrinter<internal::string>::Print(regex_->pattern(), os);
+ }
+
+ void DescribeNegationTo(::std::ostream* os) const {
+ *os << "doesn't " << (full_match_ ? "match" : "contain")
+ << " regular expression ";
+ UniversalPrinter<internal::string>::Print(regex_->pattern(), os);
+ }
+
+ private:
+ const internal::linked_ptr<const RE> regex_;
+ const bool full_match_;
+
+ GTEST_DISALLOW_ASSIGN_(MatchesRegexMatcher);
+};
+
+// Implements a matcher that compares the two fields of a 2-tuple
+// using one of the ==, <=, <, etc, operators. The two fields being
+// compared don't have to have the same type.
+//
+// The matcher defined here is polymorphic (for example, Eq() can be
+// used to match a tuple<int, short>, a tuple<const long&, double>,
+// etc). Therefore we use a template type conversion operator in the
+// implementation.
+//
+// We define this as a macro in order to eliminate duplicated source
+// code.
+#define GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(name, op, relation) \
+ class name##2Matcher { \
+ public: \
+ template <typename T1, typename T2> \
+ operator Matcher< ::std::tr1::tuple<T1, T2> >() const { \
+ return MakeMatcher(new Impl< ::std::tr1::tuple<T1, T2> >); \
+ } \
+ template <typename T1, typename T2> \
+ operator Matcher<const ::std::tr1::tuple<T1, T2>&>() const { \
+ return MakeMatcher(new Impl<const ::std::tr1::tuple<T1, T2>&>); \
+ } \
+ private: \
+ template <typename Tuple> \
+ class Impl : public MatcherInterface<Tuple> { \
+ public: \
+ virtual bool MatchAndExplain( \
+ Tuple args, \
+ MatchResultListener* /* listener */) const { \
+ return ::std::tr1::get<0>(args) op ::std::tr1::get<1>(args); \
+ } \
+ virtual void DescribeTo(::std::ostream* os) const { \
+ *os << "are " relation; \
+ } \
+ virtual void DescribeNegationTo(::std::ostream* os) const { \
+ *os << "aren't " relation; \
+ } \
+ }; \
+ }
+
+// Implements Eq(), Ge(), Gt(), Le(), Lt(), and Ne() respectively.
+GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(Eq, ==, "an equal pair");
+GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(
+ Ge, >=, "a pair where the first >= the second");
+GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(
+ Gt, >, "a pair where the first > the second");
+GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(
+ Le, <=, "a pair where the first <= the second");
+GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(
+ Lt, <, "a pair where the first < the second");
+GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(Ne, !=, "an unequal pair");
+
+#undef GMOCK_IMPLEMENT_COMPARISON2_MATCHER_
+
+// Implements the Not(...) matcher for a particular argument type T.
+// We do not nest it inside the NotMatcher class template, as that
+// will prevent different instantiations of NotMatcher from sharing
+// the same NotMatcherImpl<T> class.
+template <typename T>
+class NotMatcherImpl : public MatcherInterface<T> {
+ public:
+ explicit NotMatcherImpl(const Matcher<T>& matcher)
+ : matcher_(matcher) {}
+
+ virtual bool MatchAndExplain(T x, MatchResultListener* listener) const {
+ return !matcher_.MatchAndExplain(x, listener);
+ }
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ matcher_.DescribeNegationTo(os);
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ matcher_.DescribeTo(os);
+ }
+
+ private:
+ const Matcher<T> matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(NotMatcherImpl);
+};
+
+// Implements the Not(m) matcher, which matches a value that doesn't
+// match matcher m.
+template <typename InnerMatcher>
+class NotMatcher {
+ public:
+ explicit NotMatcher(InnerMatcher matcher) : matcher_(matcher) {}
+
+ // This template type conversion operator allows Not(m) to be used
+ // to match any type m can match.
+ template <typename T>
+ operator Matcher<T>() const {
+ return Matcher<T>(new NotMatcherImpl<T>(SafeMatcherCast<T>(matcher_)));
+ }
+
+ private:
+ InnerMatcher matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(NotMatcher);
+};
+
+// Implements the AllOf(m1, m2) matcher for a particular argument type
+// T. We do not nest it inside the BothOfMatcher class template, as
+// that will prevent different instantiations of BothOfMatcher from
+// sharing the same BothOfMatcherImpl<T> class.
+template <typename T>
+class BothOfMatcherImpl : public MatcherInterface<T> {
+ public:
+ BothOfMatcherImpl(const Matcher<T>& matcher1, const Matcher<T>& matcher2)
+ : matcher1_(matcher1), matcher2_(matcher2) {}
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "(";
+ matcher1_.DescribeTo(os);
+ *os << ") and (";
+ matcher2_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "(";
+ matcher1_.DescribeNegationTo(os);
+ *os << ") or (";
+ matcher2_.DescribeNegationTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(T x, MatchResultListener* listener) const {
+ // If either matcher1_ or matcher2_ doesn't match x, we only need
+ // to explain why one of them fails.
+ StringMatchResultListener listener1;
+ if (!matcher1_.MatchAndExplain(x, &listener1)) {
+ *listener << listener1.str();
+ return false;
+ }
+
+ StringMatchResultListener listener2;
+ if (!matcher2_.MatchAndExplain(x, &listener2)) {
+ *listener << listener2.str();
+ return false;
+ }
+
+ // Otherwise we need to explain why *both* of them match.
+ const internal::string s1 = listener1.str();
+ const internal::string s2 = listener2.str();
+
+ if (s1 == "") {
+ *listener << s2;
+ } else {
+ *listener << s1;
+ if (s2 != "") {
+ *listener << ", and " << s2;
+ }
+ }
+ return true;
+ }
+
+ private:
+ const Matcher<T> matcher1_;
+ const Matcher<T> matcher2_;
+
+ GTEST_DISALLOW_ASSIGN_(BothOfMatcherImpl);
+};
+
+// Used for implementing the AllOf(m_1, ..., m_n) matcher, which
+// matches a value that matches all of the matchers m_1, ..., and m_n.
+template <typename Matcher1, typename Matcher2>
+class BothOfMatcher {
+ public:
+ BothOfMatcher(Matcher1 matcher1, Matcher2 matcher2)
+ : matcher1_(matcher1), matcher2_(matcher2) {}
+
+ // This template type conversion operator allows a
+ // BothOfMatcher<Matcher1, Matcher2> object to match any type that
+ // both Matcher1 and Matcher2 can match.
+ template <typename T>
+ operator Matcher<T>() const {
+ return Matcher<T>(new BothOfMatcherImpl<T>(SafeMatcherCast<T>(matcher1_),
+ SafeMatcherCast<T>(matcher2_)));
+ }
+
+ private:
+ Matcher1 matcher1_;
+ Matcher2 matcher2_;
+
+ GTEST_DISALLOW_ASSIGN_(BothOfMatcher);
+};
+
+// Implements the AnyOf(m1, m2) matcher for a particular argument type
+// T. We do not nest it inside the AnyOfMatcher class template, as
+// that will prevent different instantiations of AnyOfMatcher from
+// sharing the same EitherOfMatcherImpl<T> class.
+template <typename T>
+class EitherOfMatcherImpl : public MatcherInterface<T> {
+ public:
+ EitherOfMatcherImpl(const Matcher<T>& matcher1, const Matcher<T>& matcher2)
+ : matcher1_(matcher1), matcher2_(matcher2) {}
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "(";
+ matcher1_.DescribeTo(os);
+ *os << ") or (";
+ matcher2_.DescribeTo(os);
+ *os << ")";
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "(";
+ matcher1_.DescribeNegationTo(os);
+ *os << ") and (";
+ matcher2_.DescribeNegationTo(os);
+ *os << ")";
+ }
+
+ virtual bool MatchAndExplain(T x, MatchResultListener* listener) const {
+ // If either matcher1_ or matcher2_ matches x, we just need to
+ // explain why *one* of them matches.
+ StringMatchResultListener listener1;
+ if (matcher1_.MatchAndExplain(x, &listener1)) {
+ *listener << listener1.str();
+ return true;
+ }
+
+ StringMatchResultListener listener2;
+ if (matcher2_.MatchAndExplain(x, &listener2)) {
+ *listener << listener2.str();
+ return true;
+ }
+
+ // Otherwise we need to explain why *both* of them fail.
+ const internal::string s1 = listener1.str();
+ const internal::string s2 = listener2.str();
+
+ if (s1 == "") {
+ *listener << s2;
+ } else {
+ *listener << s1;
+ if (s2 != "") {
+ *listener << ", and " << s2;
+ }
+ }
+ return false;
+ }
+
+ private:
+ const Matcher<T> matcher1_;
+ const Matcher<T> matcher2_;
+
+ GTEST_DISALLOW_ASSIGN_(EitherOfMatcherImpl);
+};
+
+// Used for implementing the AnyOf(m_1, ..., m_n) matcher, which
+// matches a value that matches at least one of the matchers m_1, ...,
+// and m_n.
+template <typename Matcher1, typename Matcher2>
+class EitherOfMatcher {
+ public:
+ EitherOfMatcher(Matcher1 matcher1, Matcher2 matcher2)
+ : matcher1_(matcher1), matcher2_(matcher2) {}
+
+ // This template type conversion operator allows a
+ // EitherOfMatcher<Matcher1, Matcher2> object to match any type that
+ // both Matcher1 and Matcher2 can match.
+ template <typename T>
+ operator Matcher<T>() const {
+ return Matcher<T>(new EitherOfMatcherImpl<T>(
+ SafeMatcherCast<T>(matcher1_), SafeMatcherCast<T>(matcher2_)));
+ }
+
+ private:
+ Matcher1 matcher1_;
+ Matcher2 matcher2_;
+
+ GTEST_DISALLOW_ASSIGN_(EitherOfMatcher);
+};
+
+// Used for implementing Truly(pred), which turns a predicate into a
+// matcher.
+template <typename Predicate>
+class TrulyMatcher {
+ public:
+ explicit TrulyMatcher(Predicate pred) : predicate_(pred) {}
+
+ // This method template allows Truly(pred) to be used as a matcher
+ // for type T where T is the argument type of predicate 'pred'. The
+ // argument is passed by reference as the predicate may be
+ // interested in the address of the argument.
+ template <typename T>
+ bool MatchAndExplain(T& x, // NOLINT
+ MatchResultListener* /* listener */) const {
+ // Without the if-statement, MSVC sometimes warns about converting
+ // a value to bool (warning 4800).
+ //
+ // We cannot write 'return !!predicate_(x);' as that doesn't work
+ // when predicate_(x) returns a class convertible to bool but
+ // having no operator!().
+ if (predicate_(x))
+ return true;
+ return false;
+ }
+
+ void DescribeTo(::std::ostream* os) const {
+ *os << "satisfies the given predicate";
+ }
+
+ void DescribeNegationTo(::std::ostream* os) const {
+ *os << "doesn't satisfy the given predicate";
+ }
+
+ private:
+ Predicate predicate_;
+
+ GTEST_DISALLOW_ASSIGN_(TrulyMatcher);
+};
+
+// Used for implementing Matches(matcher), which turns a matcher into
+// a predicate.
+template <typename M>
+class MatcherAsPredicate {
+ public:
+ explicit MatcherAsPredicate(M matcher) : matcher_(matcher) {}
+
+ // This template operator() allows Matches(m) to be used as a
+ // predicate on type T where m is a matcher on type T.
+ //
+ // The argument x is passed by reference instead of by value, as
+ // some matcher may be interested in its address (e.g. as in
+ // Matches(Ref(n))(x)).
+ template <typename T>
+ bool operator()(const T& x) const {
+ // We let matcher_ commit to a particular type here instead of
+ // when the MatcherAsPredicate object was constructed. This
+ // allows us to write Matches(m) where m is a polymorphic matcher
+ // (e.g. Eq(5)).
+ //
+ // If we write Matcher<T>(matcher_).Matches(x) here, it won't
+ // compile when matcher_ has type Matcher<const T&>; if we write
+ // Matcher<const T&>(matcher_).Matches(x) here, it won't compile
+ // when matcher_ has type Matcher<T>; if we just write
+ // matcher_.Matches(x), it won't compile when matcher_ is
+ // polymorphic, e.g. Eq(5).
+ //
+ // MatcherCast<const T&>() is necessary for making the code work
+ // in all of the above situations.
+ return MatcherCast<const T&>(matcher_).Matches(x);
+ }
+
+ private:
+ M matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(MatcherAsPredicate);
+};
+
+// For implementing ASSERT_THAT() and EXPECT_THAT(). The template
+// argument M must be a type that can be converted to a matcher.
+template <typename M>
+class PredicateFormatterFromMatcher {
+ public:
+ explicit PredicateFormatterFromMatcher(const M& m) : matcher_(m) {}
+
+ // This template () operator allows a PredicateFormatterFromMatcher
+ // object to act as a predicate-formatter suitable for using with
+ // Google Test's EXPECT_PRED_FORMAT1() macro.
+ template <typename T>
+ AssertionResult operator()(const char* value_text, const T& x) const {
+ // We convert matcher_ to a Matcher<const T&> *now* instead of
+ // when the PredicateFormatterFromMatcher object was constructed,
+ // as matcher_ may be polymorphic (e.g. NotNull()) and we won't
+ // know which type to instantiate it to until we actually see the
+ // type of x here.
+ //
+ // We write MatcherCast<const T&>(matcher_) instead of
+ // Matcher<const T&>(matcher_), as the latter won't compile when
+ // matcher_ has type Matcher<T> (e.g. An<int>()).
+ const Matcher<const T&> matcher = MatcherCast<const T&>(matcher_);
+ StringMatchResultListener listener;
+ if (MatchPrintAndExplain(x, matcher, &listener))
+ return AssertionSuccess();
+
+ ::std::stringstream ss;
+ ss << "Value of: " << value_text << "\n"
+ << "Expected: ";
+ matcher.DescribeTo(&ss);
+ ss << "\n Actual: " << listener.str();
+ return AssertionFailure() << ss.str();
+ }
+
+ private:
+ const M matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(PredicateFormatterFromMatcher);
+};
+
+// A helper function for converting a matcher to a predicate-formatter
+// without the user needing to explicitly write the type. This is
+// used for implementing ASSERT_THAT() and EXPECT_THAT().
+template <typename M>
+inline PredicateFormatterFromMatcher<M>
+MakePredicateFormatterFromMatcher(const M& matcher) {
+ return PredicateFormatterFromMatcher<M>(matcher);
+}
+
+// Implements the polymorphic floating point equality matcher, which
+// matches two float values using ULP-based approximation. The
+// template is meant to be instantiated with FloatType being either
+// float or double.
+template <typename FloatType>
+class FloatingEqMatcher {
+ public:
+ // Constructor for FloatingEqMatcher.
+ // The matcher's input will be compared with rhs. The matcher treats two
+ // NANs as equal if nan_eq_nan is true. Otherwise, under IEEE standards,
+ // equality comparisons between NANs will always return false.
+ FloatingEqMatcher(FloatType rhs, bool nan_eq_nan) :
+ rhs_(rhs), nan_eq_nan_(nan_eq_nan) {}
+
+ // Implements floating point equality matcher as a Matcher<T>.
+ template <typename T>
+ class Impl : public MatcherInterface<T> {
+ public:
+ Impl(FloatType rhs, bool nan_eq_nan) :
+ rhs_(rhs), nan_eq_nan_(nan_eq_nan) {}
+
+ virtual bool MatchAndExplain(T value,
+ MatchResultListener* /* listener */) const {
+ const FloatingPoint<FloatType> lhs(value), rhs(rhs_);
+
+ // Compares NaNs first, if nan_eq_nan_ is true.
+ if (nan_eq_nan_ && lhs.is_nan()) {
+ return rhs.is_nan();
+ }
+
+ return lhs.AlmostEquals(rhs);
+ }
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ // os->precision() returns the previously set precision, which we
+ // store to restore the ostream to its original configuration
+ // after outputting.
+ const ::std::streamsize old_precision = os->precision(
+ ::std::numeric_limits<FloatType>::digits10 + 2);
+ if (FloatingPoint<FloatType>(rhs_).is_nan()) {
+ if (nan_eq_nan_) {
+ *os << "is NaN";
+ } else {
+ *os << "never matches";
+ }
+ } else {
+ *os << "is approximately " << rhs_;
+ }
+ os->precision(old_precision);
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ // As before, get original precision.
+ const ::std::streamsize old_precision = os->precision(
+ ::std::numeric_limits<FloatType>::digits10 + 2);
+ if (FloatingPoint<FloatType>(rhs_).is_nan()) {
+ if (nan_eq_nan_) {
+ *os << "isn't NaN";
+ } else {
+ *os << "is anything";
+ }
+ } else {
+ *os << "isn't approximately " << rhs_;
+ }
+ // Restore original precision.
+ os->precision(old_precision);
+ }
+
+ private:
+ const FloatType rhs_;
+ const bool nan_eq_nan_;
+
+ GTEST_DISALLOW_ASSIGN_(Impl);
+ };
+
+ // The following 3 type conversion operators allow FloatEq(rhs) and
+ // NanSensitiveFloatEq(rhs) to be used as a Matcher<float>, a
+ // Matcher<const float&>, or a Matcher<float&>, but nothing else.
+ // (While Google's C++ coding style doesn't allow arguments passed
+ // by non-const reference, we may see them in code not conforming to
+ // the style. Therefore Google Mock needs to support them.)
+ operator Matcher<FloatType>() const {
+ return MakeMatcher(new Impl<FloatType>(rhs_, nan_eq_nan_));
+ }
+
+ operator Matcher<const FloatType&>() const {
+ return MakeMatcher(new Impl<const FloatType&>(rhs_, nan_eq_nan_));
+ }
+
+ operator Matcher<FloatType&>() const {
+ return MakeMatcher(new Impl<FloatType&>(rhs_, nan_eq_nan_));
+ }
+ private:
+ const FloatType rhs_;
+ const bool nan_eq_nan_;
+
+ GTEST_DISALLOW_ASSIGN_(FloatingEqMatcher);
+};
+
+// Implements the Pointee(m) matcher for matching a pointer whose
+// pointee matches matcher m. The pointer can be either raw or smart.
+template <typename InnerMatcher>
+class PointeeMatcher {
+ public:
+ explicit PointeeMatcher(const InnerMatcher& matcher) : matcher_(matcher) {}
+
+ // This type conversion operator template allows Pointee(m) to be
+ // used as a matcher for any pointer type whose pointee type is
+ // compatible with the inner matcher, where type Pointer can be
+ // either a raw pointer or a smart pointer.
+ //
+ // The reason we do this instead of relying on
+ // MakePolymorphicMatcher() is that the latter is not flexible
+ // enough for implementing the DescribeTo() method of Pointee().
+ template <typename Pointer>
+ operator Matcher<Pointer>() const {
+ return MakeMatcher(new Impl<Pointer>(matcher_));
+ }
+
+ private:
+ // The monomorphic implementation that works for a particular pointer type.
+ template <typename Pointer>
+ class Impl : public MatcherInterface<Pointer> {
+ public:
+ typedef typename PointeeOf<GTEST_REMOVE_CONST_( // NOLINT
+ GTEST_REMOVE_REFERENCE_(Pointer))>::type Pointee;
+
+ explicit Impl(const InnerMatcher& matcher)
+ : matcher_(MatcherCast<const Pointee&>(matcher)) {}
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "points to a value that ";
+ matcher_.DescribeTo(os);
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "does not point to a value that ";
+ matcher_.DescribeTo(os);
+ }
+
+ virtual bool MatchAndExplain(Pointer pointer,
+ MatchResultListener* listener) const {
+ if (GetRawPointer(pointer) == NULL)
+ return false;
+
+ *listener << "which points to ";
+ return MatchPrintAndExplain(*pointer, matcher_, listener);
+ }
+
+ private:
+ const Matcher<const Pointee&> matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(Impl);
+ };
+
+ const InnerMatcher matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(PointeeMatcher);
+};
+
+// Implements the Field() matcher for matching a field (i.e. member
+// variable) of an object.
+template <typename Class, typename FieldType>
+class FieldMatcher {
+ public:
+ FieldMatcher(FieldType Class::*field,
+ const Matcher<const FieldType&>& matcher)
+ : field_(field), matcher_(matcher) {}
+
+ void DescribeTo(::std::ostream* os) const {
+ *os << "is an object whose given field ";
+ matcher_.DescribeTo(os);
+ }
+
+ void DescribeNegationTo(::std::ostream* os) const {
+ *os << "is an object whose given field ";
+ matcher_.DescribeNegationTo(os);
+ }
+
+ template <typename T>
+ bool MatchAndExplain(const T& value, MatchResultListener* listener) const {
+ return MatchAndExplainImpl(
+ typename ::testing::internal::
+ is_pointer<GTEST_REMOVE_CONST_(T)>::type(),
+ value, listener);
+ }
+
+ private:
+ // The first argument of MatchAndExplainImpl() is needed to help
+ // Symbian's C++ compiler choose which overload to use. Its type is
+ // true_type iff the Field() matcher is used to match a pointer.
+ bool MatchAndExplainImpl(false_type /* is_not_pointer */, const Class& obj,
+ MatchResultListener* listener) const {
+ *listener << "whose given field is ";
+ return MatchPrintAndExplain(obj.*field_, matcher_, listener);
+ }
+
+ bool MatchAndExplainImpl(true_type /* is_pointer */, const Class* p,
+ MatchResultListener* listener) const {
+ if (p == NULL)
+ return false;
+
+ *listener << "which points to an object ";
+ // Since *p has a field, it must be a class/struct/union type and
+ // thus cannot be a pointer. Therefore we pass false_type() as
+ // the first argument.
+ return MatchAndExplainImpl(false_type(), *p, listener);
+ }
+
+ const FieldType Class::*field_;
+ const Matcher<const FieldType&> matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(FieldMatcher);
+};
+
+// Implements the Property() matcher for matching a property
+// (i.e. return value of a getter method) of an object.
+template <typename Class, typename PropertyType>
+class PropertyMatcher {
+ public:
+ // The property may have a reference type, so 'const PropertyType&'
+ // may cause double references and fail to compile. That's why we
+ // need GTEST_REFERENCE_TO_CONST, which works regardless of
+ // PropertyType being a reference or not.
+ typedef GTEST_REFERENCE_TO_CONST_(PropertyType) RefToConstProperty;
+
+ PropertyMatcher(PropertyType (Class::*property)() const,
+ const Matcher<RefToConstProperty>& matcher)
+ : property_(property), matcher_(matcher) {}
+
+ void DescribeTo(::std::ostream* os) const {
+ *os << "is an object whose given property ";
+ matcher_.DescribeTo(os);
+ }
+
+ void DescribeNegationTo(::std::ostream* os) const {
+ *os << "is an object whose given property ";
+ matcher_.DescribeNegationTo(os);
+ }
+
+ template <typename T>
+ bool MatchAndExplain(const T&value, MatchResultListener* listener) const {
+ return MatchAndExplainImpl(
+ typename ::testing::internal::
+ is_pointer<GTEST_REMOVE_CONST_(T)>::type(),
+ value, listener);
+ }
+
+ private:
+ // The first argument of MatchAndExplainImpl() is needed to help
+ // Symbian's C++ compiler choose which overload to use. Its type is
+ // true_type iff the Property() matcher is used to match a pointer.
+ bool MatchAndExplainImpl(false_type /* is_not_pointer */, const Class& obj,
+ MatchResultListener* listener) const {
+ *listener << "whose given property is ";
+ // Cannot pass the return value (for example, int) to MatchPrintAndExplain,
+ // which takes a non-const reference as argument.
+ RefToConstProperty result = (obj.*property_)();
+ return MatchPrintAndExplain(result, matcher_, listener);
+ }
+
+ bool MatchAndExplainImpl(true_type /* is_pointer */, const Class* p,
+ MatchResultListener* listener) const {
+ if (p == NULL)
+ return false;
+
+ *listener << "which points to an object ";
+ // Since *p has a property method, it must be a class/struct/union
+ // type and thus cannot be a pointer. Therefore we pass
+ // false_type() as the first argument.
+ return MatchAndExplainImpl(false_type(), *p, listener);
+ }
+
+ PropertyType (Class::*property_)() const;
+ const Matcher<RefToConstProperty> matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(PropertyMatcher);
+};
+
+// Type traits specifying various features of different functors for ResultOf.
+// The default template specifies features for functor objects.
+// Functor classes have to typedef argument_type and result_type
+// to be compatible with ResultOf.
+template <typename Functor>
+struct CallableTraits {
+ typedef typename Functor::result_type ResultType;
+ typedef Functor StorageType;
+
+ static void CheckIsValid(Functor /* functor */) {}
+ template <typename T>
+ static ResultType Invoke(Functor f, T arg) { return f(arg); }
+};
+
+// Specialization for function pointers.
+template <typename ArgType, typename ResType>
+struct CallableTraits<ResType(*)(ArgType)> {
+ typedef ResType ResultType;
+ typedef ResType(*StorageType)(ArgType);
+
+ static void CheckIsValid(ResType(*f)(ArgType)) {
+ GTEST_CHECK_(f != NULL)
+ << "NULL function pointer is passed into ResultOf().";
+ }
+ template <typename T>
+ static ResType Invoke(ResType(*f)(ArgType), T arg) {
+ return (*f)(arg);
+ }
+};
+
+// Implements the ResultOf() matcher for matching a return value of a
+// unary function of an object.
+template <typename Callable>
+class ResultOfMatcher {
+ public:
+ typedef typename CallableTraits<Callable>::ResultType ResultType;
+
+ ResultOfMatcher(Callable callable, const Matcher<ResultType>& matcher)
+ : callable_(callable), matcher_(matcher) {
+ CallableTraits<Callable>::CheckIsValid(callable_);
+ }
+
+ template <typename T>
+ operator Matcher<T>() const {
+ return Matcher<T>(new Impl<T>(callable_, matcher_));
+ }
+
+ private:
+ typedef typename CallableTraits<Callable>::StorageType CallableStorageType;
+
+ template <typename T>
+ class Impl : public MatcherInterface<T> {
+ public:
+ Impl(CallableStorageType callable, const Matcher<ResultType>& matcher)
+ : callable_(callable), matcher_(matcher) {}
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "is mapped by the given callable to a value that ";
+ matcher_.DescribeTo(os);
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "is mapped by the given callable to a value that ";
+ matcher_.DescribeNegationTo(os);
+ }
+
+ virtual bool MatchAndExplain(T obj, MatchResultListener* listener) const {
+ *listener << "which is mapped by the given callable to ";
+ // Cannot pass the return value (for example, int) to
+ // MatchPrintAndExplain, which takes a non-const reference as argument.
+ ResultType result =
+ CallableTraits<Callable>::template Invoke<T>(callable_, obj);
+ return MatchPrintAndExplain(result, matcher_, listener);
+ }
+
+ private:
+ // Functors often define operator() as non-const method even though
+ // they are actualy stateless. But we need to use them even when
+ // 'this' is a const pointer. It's the user's responsibility not to
+ // use stateful callables with ResultOf(), which does't guarantee
+ // how many times the callable will be invoked.
+ mutable CallableStorageType callable_;
+ const Matcher<ResultType> matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(Impl);
+ }; // class Impl
+
+ const CallableStorageType callable_;
+ const Matcher<ResultType> matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(ResultOfMatcher);
+};
+
+// Implements an equality matcher for any STL-style container whose elements
+// support ==. This matcher is like Eq(), but its failure explanations provide
+// more detailed information that is useful when the container is used as a set.
+// The failure message reports elements that are in one of the operands but not
+// the other. The failure messages do not report duplicate or out-of-order
+// elements in the containers (which don't properly matter to sets, but can
+// occur if the containers are vectors or lists, for example).
+//
+// Uses the container's const_iterator, value_type, operator ==,
+// begin(), and end().
+template <typename Container>
+class ContainerEqMatcher {
+ public:
+ typedef internal::StlContainerView<Container> View;
+ typedef typename View::type StlContainer;
+ typedef typename View::const_reference StlContainerReference;
+
+ // We make a copy of rhs in case the elements in it are modified
+ // after this matcher is created.
+ explicit ContainerEqMatcher(const Container& rhs) : rhs_(View::Copy(rhs)) {
+ // Makes sure the user doesn't instantiate this class template
+ // with a const or reference type.
+ (void)testing::StaticAssertTypeEq<Container,
+ GTEST_REMOVE_REFERENCE_AND_CONST_(Container)>();
+ }
+
+ void DescribeTo(::std::ostream* os) const {
+ *os << "equals ";
+ UniversalPrint(rhs_, os);
+ }
+ void DescribeNegationTo(::std::ostream* os) const {
+ *os << "does not equal ";
+ UniversalPrint(rhs_, os);
+ }
+
+ template <typename LhsContainer>
+ bool MatchAndExplain(const LhsContainer& lhs,
+ MatchResultListener* listener) const {
+ // GTEST_REMOVE_CONST_() is needed to work around an MSVC 8.0 bug
+ // that causes LhsContainer to be a const type sometimes.
+ typedef internal::StlContainerView<GTEST_REMOVE_CONST_(LhsContainer)>
+ LhsView;
+ typedef typename LhsView::type LhsStlContainer;
+ StlContainerReference lhs_stl_container = LhsView::ConstReference(lhs);
+ if (lhs_stl_container == rhs_)
+ return true;
+
+ ::std::ostream* const os = listener->stream();
+ if (os != NULL) {
+ // Something is different. Check for extra values first.
+ bool printed_header = false;
+ for (typename LhsStlContainer::const_iterator it =
+ lhs_stl_container.begin();
+ it != lhs_stl_container.end(); ++it) {
+ if (internal::ArrayAwareFind(rhs_.begin(), rhs_.end(), *it) ==
+ rhs_.end()) {
+ if (printed_header) {
+ *os << ", ";
+ } else {
+ *os << "which has these unexpected elements: ";
+ printed_header = true;
+ }
+ UniversalPrint(*it, os);
+ }
+ }
+
+ // Now check for missing values.
+ bool printed_header2 = false;
+ for (typename StlContainer::const_iterator it = rhs_.begin();
+ it != rhs_.end(); ++it) {
+ if (internal::ArrayAwareFind(
+ lhs_stl_container.begin(), lhs_stl_container.end(), *it) ==
+ lhs_stl_container.end()) {
+ if (printed_header2) {
+ *os << ", ";
+ } else {
+ *os << (printed_header ? ",\nand" : "which")
+ << " doesn't have these expected elements: ";
+ printed_header2 = true;
+ }
+ UniversalPrint(*it, os);
+ }
+ }
+ }
+
+ return false;
+ }
+
+ private:
+ const StlContainer rhs_;
+
+ GTEST_DISALLOW_ASSIGN_(ContainerEqMatcher);
+};
+
+// Implements Pointwise(tuple_matcher, rhs_container). tuple_matcher
+// must be able to be safely cast to Matcher<tuple<const T1&, const
+// T2&> >, where T1 and T2 are the types of elements in the LHS
+// container and the RHS container respectively.
+template <typename TupleMatcher, typename RhsContainer>
+class PointwiseMatcher {
+ public:
+ typedef internal::StlContainerView<RhsContainer> RhsView;
+ typedef typename RhsView::type RhsStlContainer;
+ typedef typename RhsStlContainer::value_type RhsValue;
+
+ // Like ContainerEq, we make a copy of rhs in case the elements in
+ // it are modified after this matcher is created.
+ PointwiseMatcher(const TupleMatcher& tuple_matcher, const RhsContainer& rhs)
+ : tuple_matcher_(tuple_matcher), rhs_(RhsView::Copy(rhs)) {
+ // Makes sure the user doesn't instantiate this class template
+ // with a const or reference type.
+ (void)testing::StaticAssertTypeEq<RhsContainer,
+ GTEST_REMOVE_REFERENCE_AND_CONST_(RhsContainer)>();
+ }
+
+ template <typename LhsContainer>
+ operator Matcher<LhsContainer>() const {
+ return MakeMatcher(new Impl<LhsContainer>(tuple_matcher_, rhs_));
+ }
+
+ template <typename LhsContainer>
+ class Impl : public MatcherInterface<LhsContainer> {
+ public:
+ typedef internal::StlContainerView<
+ GTEST_REMOVE_REFERENCE_AND_CONST_(LhsContainer)> LhsView;
+ typedef typename LhsView::type LhsStlContainer;
+ typedef typename LhsView::const_reference LhsStlContainerReference;
+ typedef typename LhsStlContainer::value_type LhsValue;
+ // We pass the LHS value and the RHS value to the inner matcher by
+ // reference, as they may be expensive to copy. We must use tuple
+ // instead of pair here, as a pair cannot hold references (C++ 98,
+ // 20.2.2 [lib.pairs]).
+ typedef std::tr1::tuple<const LhsValue&, const RhsValue&> InnerMatcherArg;
+
+ Impl(const TupleMatcher& tuple_matcher, const RhsStlContainer& rhs)
+ // mono_tuple_matcher_ holds a monomorphic version of the tuple matcher.
+ : mono_tuple_matcher_(SafeMatcherCast<InnerMatcherArg>(tuple_matcher)),
+ rhs_(rhs) {}
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "contains " << rhs_.size()
+ << " values, where each value and its corresponding value in ";
+ UniversalPrinter<RhsStlContainer>::Print(rhs_, os);
+ *os << " ";
+ mono_tuple_matcher_.DescribeTo(os);
+ }
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "doesn't contain exactly " << rhs_.size()
+ << " values, or contains a value x at some index i"
+ << " where x and the i-th value of ";
+ UniversalPrint(rhs_, os);
+ *os << " ";
+ mono_tuple_matcher_.DescribeNegationTo(os);
+ }
+
+ virtual bool MatchAndExplain(LhsContainer lhs,
+ MatchResultListener* listener) const {
+ LhsStlContainerReference lhs_stl_container = LhsView::ConstReference(lhs);
+ const size_t actual_size = lhs_stl_container.size();
+ if (actual_size != rhs_.size()) {
+ *listener << "which contains " << actual_size << " values";
+ return false;
+ }
+
+ typename LhsStlContainer::const_iterator left = lhs_stl_container.begin();
+ typename RhsStlContainer::const_iterator right = rhs_.begin();
+ for (size_t i = 0; i != actual_size; ++i, ++left, ++right) {
+ const InnerMatcherArg value_pair(*left, *right);
+
+ if (listener->IsInterested()) {
+ StringMatchResultListener inner_listener;
+ if (!mono_tuple_matcher_.MatchAndExplain(
+ value_pair, &inner_listener)) {
+ *listener << "where the value pair (";
+ UniversalPrint(*left, listener->stream());
+ *listener << ", ";
+ UniversalPrint(*right, listener->stream());
+ *listener << ") at index #" << i << " don't match";
+ PrintIfNotEmpty(inner_listener.str(), listener->stream());
+ return false;
+ }
+ } else {
+ if (!mono_tuple_matcher_.Matches(value_pair))
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ private:
+ const Matcher<InnerMatcherArg> mono_tuple_matcher_;
+ const RhsStlContainer rhs_;
+
+ GTEST_DISALLOW_ASSIGN_(Impl);
+ };
+
+ private:
+ const TupleMatcher tuple_matcher_;
+ const RhsStlContainer rhs_;
+
+ GTEST_DISALLOW_ASSIGN_(PointwiseMatcher);
+};
+
+// Holds the logic common to ContainsMatcherImpl and EachMatcherImpl.
+template <typename Container>
+class QuantifierMatcherImpl : public MatcherInterface<Container> {
+ public:
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef StlContainerView<RawContainer> View;
+ typedef typename View::type StlContainer;
+ typedef typename View::const_reference StlContainerReference;
+ typedef typename StlContainer::value_type Element;
+
+ template <typename InnerMatcher>
+ explicit QuantifierMatcherImpl(InnerMatcher inner_matcher)
+ : inner_matcher_(
+ testing::SafeMatcherCast<const Element&>(inner_matcher)) {}
+
+ // Checks whether:
+ // * All elements in the container match, if all_elements_should_match.
+ // * Any element in the container matches, if !all_elements_should_match.
+ bool MatchAndExplainImpl(bool all_elements_should_match,
+ Container container,
+ MatchResultListener* listener) const {
+ StlContainerReference stl_container = View::ConstReference(container);
+ size_t i = 0;
+ for (typename StlContainer::const_iterator it = stl_container.begin();
+ it != stl_container.end(); ++it, ++i) {
+ StringMatchResultListener inner_listener;
+ const bool matches = inner_matcher_.MatchAndExplain(*it, &inner_listener);
+
+ if (matches != all_elements_should_match) {
+ *listener << "whose element #" << i
+ << (matches ? " matches" : " doesn't match");
+ PrintIfNotEmpty(inner_listener.str(), listener->stream());
+ return !all_elements_should_match;
+ }
+ }
+ return all_elements_should_match;
+ }
+
+ protected:
+ const Matcher<const Element&> inner_matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(QuantifierMatcherImpl);
+};
+
+// Implements Contains(element_matcher) for the given argument type Container.
+// Symmetric to EachMatcherImpl.
+template <typename Container>
+class ContainsMatcherImpl : public QuantifierMatcherImpl<Container> {
+ public:
+ template <typename InnerMatcher>
+ explicit ContainsMatcherImpl(InnerMatcher inner_matcher)
+ : QuantifierMatcherImpl<Container>(inner_matcher) {}
+
+ // Describes what this matcher does.
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "contains at least one element that ";
+ this->inner_matcher_.DescribeTo(os);
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "doesn't contain any element that ";
+ this->inner_matcher_.DescribeTo(os);
+ }
+
+ virtual bool MatchAndExplain(Container container,
+ MatchResultListener* listener) const {
+ return this->MatchAndExplainImpl(false, container, listener);
+ }
+
+ private:
+ GTEST_DISALLOW_ASSIGN_(ContainsMatcherImpl);
+};
+
+// Implements Each(element_matcher) for the given argument type Container.
+// Symmetric to ContainsMatcherImpl.
+template <typename Container>
+class EachMatcherImpl : public QuantifierMatcherImpl<Container> {
+ public:
+ template <typename InnerMatcher>
+ explicit EachMatcherImpl(InnerMatcher inner_matcher)
+ : QuantifierMatcherImpl<Container>(inner_matcher) {}
+
+ // Describes what this matcher does.
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "only contains elements that ";
+ this->inner_matcher_.DescribeTo(os);
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "contains some element that ";
+ this->inner_matcher_.DescribeNegationTo(os);
+ }
+
+ virtual bool MatchAndExplain(Container container,
+ MatchResultListener* listener) const {
+ return this->MatchAndExplainImpl(true, container, listener);
+ }
+
+ private:
+ GTEST_DISALLOW_ASSIGN_(EachMatcherImpl);
+};
+
+// Implements polymorphic Contains(element_matcher).
+template <typename M>
+class ContainsMatcher {
+ public:
+ explicit ContainsMatcher(M m) : inner_matcher_(m) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ return MakeMatcher(new ContainsMatcherImpl<Container>(inner_matcher_));
+ }
+
+ private:
+ const M inner_matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(ContainsMatcher);
+};
+
+// Implements polymorphic Each(element_matcher).
+template <typename M>
+class EachMatcher {
+ public:
+ explicit EachMatcher(M m) : inner_matcher_(m) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ return MakeMatcher(new EachMatcherImpl<Container>(inner_matcher_));
+ }
+
+ private:
+ const M inner_matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(EachMatcher);
+};
+
+// Implements Key(inner_matcher) for the given argument pair type.
+// Key(inner_matcher) matches an std::pair whose 'first' field matches
+// inner_matcher. For example, Contains(Key(Ge(5))) can be used to match an
+// std::map that contains at least one element whose key is >= 5.
+template <typename PairType>
+class KeyMatcherImpl : public MatcherInterface<PairType> {
+ public:
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(PairType) RawPairType;
+ typedef typename RawPairType::first_type KeyType;
+
+ template <typename InnerMatcher>
+ explicit KeyMatcherImpl(InnerMatcher inner_matcher)
+ : inner_matcher_(
+ testing::SafeMatcherCast<const KeyType&>(inner_matcher)) {
+ }
+
+ // Returns true iff 'key_value.first' (the key) matches the inner matcher.
+ virtual bool MatchAndExplain(PairType key_value,
+ MatchResultListener* listener) const {
+ StringMatchResultListener inner_listener;
+ const bool match = inner_matcher_.MatchAndExplain(key_value.first,
+ &inner_listener);
+ const internal::string explanation = inner_listener.str();
+ if (explanation != "") {
+ *listener << "whose first field is a value " << explanation;
+ }
+ return match;
+ }
+
+ // Describes what this matcher does.
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "has a key that ";
+ inner_matcher_.DescribeTo(os);
+ }
+
+ // Describes what the negation of this matcher does.
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "doesn't have a key that ";
+ inner_matcher_.DescribeTo(os);
+ }
+
+ private:
+ const Matcher<const KeyType&> inner_matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(KeyMatcherImpl);
+};
+
+// Implements polymorphic Key(matcher_for_key).
+template <typename M>
+class KeyMatcher {
+ public:
+ explicit KeyMatcher(M m) : matcher_for_key_(m) {}
+
+ template <typename PairType>
+ operator Matcher<PairType>() const {
+ return MakeMatcher(new KeyMatcherImpl<PairType>(matcher_for_key_));
+ }
+
+ private:
+ const M matcher_for_key_;
+
+ GTEST_DISALLOW_ASSIGN_(KeyMatcher);
+};
+
+// Implements Pair(first_matcher, second_matcher) for the given argument pair
+// type with its two matchers. See Pair() function below.
+template <typename PairType>
+class PairMatcherImpl : public MatcherInterface<PairType> {
+ public:
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(PairType) RawPairType;
+ typedef typename RawPairType::first_type FirstType;
+ typedef typename RawPairType::second_type SecondType;
+
+ template <typename FirstMatcher, typename SecondMatcher>
+ PairMatcherImpl(FirstMatcher first_matcher, SecondMatcher second_matcher)
+ : first_matcher_(
+ testing::SafeMatcherCast<const FirstType&>(first_matcher)),
+ second_matcher_(
+ testing::SafeMatcherCast<const SecondType&>(second_matcher)) {
+ }
+
+ // Describes what this matcher does.
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "has a first field that ";
+ first_matcher_.DescribeTo(os);
+ *os << ", and has a second field that ";
+ second_matcher_.DescribeTo(os);
+ }
+
+ // Describes what the negation of this matcher does.
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "has a first field that ";
+ first_matcher_.DescribeNegationTo(os);
+ *os << ", or has a second field that ";
+ second_matcher_.DescribeNegationTo(os);
+ }
+
+ // Returns true iff 'a_pair.first' matches first_matcher and 'a_pair.second'
+ // matches second_matcher.
+ virtual bool MatchAndExplain(PairType a_pair,
+ MatchResultListener* listener) const {
+ if (!listener->IsInterested()) {
+ // If the listener is not interested, we don't need to construct the
+ // explanation.
+ return first_matcher_.Matches(a_pair.first) &&
+ second_matcher_.Matches(a_pair.second);
+ }
+ StringMatchResultListener first_inner_listener;
+ if (!first_matcher_.MatchAndExplain(a_pair.first,
+ &first_inner_listener)) {
+ *listener << "whose first field does not match";
+ PrintIfNotEmpty(first_inner_listener.str(), listener->stream());
+ return false;
+ }
+ StringMatchResultListener second_inner_listener;
+ if (!second_matcher_.MatchAndExplain(a_pair.second,
+ &second_inner_listener)) {
+ *listener << "whose second field does not match";
+ PrintIfNotEmpty(second_inner_listener.str(), listener->stream());
+ return false;
+ }
+ ExplainSuccess(first_inner_listener.str(), second_inner_listener.str(),
+ listener);
+ return true;
+ }
+
+ private:
+ void ExplainSuccess(const internal::string& first_explanation,
+ const internal::string& second_explanation,
+ MatchResultListener* listener) const {
+ *listener << "whose both fields match";
+ if (first_explanation != "") {
+ *listener << ", where the first field is a value " << first_explanation;
+ }
+ if (second_explanation != "") {
+ *listener << ", ";
+ if (first_explanation != "") {
+ *listener << "and ";
+ } else {
+ *listener << "where ";
+ }
+ *listener << "the second field is a value " << second_explanation;
+ }
+ }
+
+ const Matcher<const FirstType&> first_matcher_;
+ const Matcher<const SecondType&> second_matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(PairMatcherImpl);
+};
+
+// Implements polymorphic Pair(first_matcher, second_matcher).
+template <typename FirstMatcher, typename SecondMatcher>
+class PairMatcher {
+ public:
+ PairMatcher(FirstMatcher first_matcher, SecondMatcher second_matcher)
+ : first_matcher_(first_matcher), second_matcher_(second_matcher) {}
+
+ template <typename PairType>
+ operator Matcher<PairType> () const {
+ return MakeMatcher(
+ new PairMatcherImpl<PairType>(
+ first_matcher_, second_matcher_));
+ }
+
+ private:
+ const FirstMatcher first_matcher_;
+ const SecondMatcher second_matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(PairMatcher);
+};
+
+// Implements ElementsAre() and ElementsAreArray().
+template <typename Container>
+class ElementsAreMatcherImpl : public MatcherInterface<Container> {
+ public:
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef internal::StlContainerView<RawContainer> View;
+ typedef typename View::type StlContainer;
+ typedef typename View::const_reference StlContainerReference;
+ typedef typename StlContainer::value_type Element;
+
+ // Constructs the matcher from a sequence of element values or
+ // element matchers.
+ template <typename InputIter>
+ ElementsAreMatcherImpl(InputIter first, size_t a_count) {
+ matchers_.reserve(a_count);
+ InputIter it = first;
+ for (size_t i = 0; i != a_count; ++i, ++it) {
+ matchers_.push_back(MatcherCast<const Element&>(*it));
+ }
+ }
+
+ // Describes what this matcher does.
+ virtual void DescribeTo(::std::ostream* os) const {
+ if (count() == 0) {
+ *os << "is empty";
+ } else if (count() == 1) {
+ *os << "has 1 element that ";
+ matchers_[0].DescribeTo(os);
+ } else {
+ *os << "has " << Elements(count()) << " where\n";
+ for (size_t i = 0; i != count(); ++i) {
+ *os << "element #" << i << " ";
+ matchers_[i].DescribeTo(os);
+ if (i + 1 < count()) {
+ *os << ",\n";
+ }
+ }
+ }
+ }
+
+ // Describes what the negation of this matcher does.
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ if (count() == 0) {
+ *os << "isn't empty";
+ return;
+ }
+
+ *os << "doesn't have " << Elements(count()) << ", or\n";
+ for (size_t i = 0; i != count(); ++i) {
+ *os << "element #" << i << " ";
+ matchers_[i].DescribeNegationTo(os);
+ if (i + 1 < count()) {
+ *os << ", or\n";
+ }
+ }
+ }
+
+ virtual bool MatchAndExplain(Container container,
+ MatchResultListener* listener) const {
+ StlContainerReference stl_container = View::ConstReference(container);
+ const size_t actual_count = stl_container.size();
+ if (actual_count != count()) {
+ // The element count doesn't match. If the container is empty,
+ // there's no need to explain anything as Google Mock already
+ // prints the empty container. Otherwise we just need to show
+ // how many elements there actually are.
+ if (actual_count != 0) {
+ *listener << "which has " << Elements(actual_count);
+ }
+ return false;
+ }
+
+ typename StlContainer::const_iterator it = stl_container.begin();
+ // explanations[i] is the explanation of the element at index i.
+ std::vector<internal::string> explanations(count());
+ for (size_t i = 0; i != count(); ++it, ++i) {
+ StringMatchResultListener s;
+ if (matchers_[i].MatchAndExplain(*it, &s)) {
+ explanations[i] = s.str();
+ } else {
+ // The container has the right size but the i-th element
+ // doesn't match its expectation.
+ *listener << "whose element #" << i << " doesn't match";
+ PrintIfNotEmpty(s.str(), listener->stream());
+ return false;
+ }
+ }
+
+ // Every element matches its expectation. We need to explain why
+ // (the obvious ones can be skipped).
+ bool reason_printed = false;
+ for (size_t i = 0; i != count(); ++i) {
+ const internal::string& s = explanations[i];
+ if (!s.empty()) {
+ if (reason_printed) {
+ *listener << ",\nand ";
+ }
+ *listener << "whose element #" << i << " matches, " << s;
+ reason_printed = true;
+ }
+ }
+
+ return true;
+ }
+
+ private:
+ static Message Elements(size_t count) {
+ return Message() << count << (count == 1 ? " element" : " elements");
+ }
+
+ size_t count() const { return matchers_.size(); }
+ std::vector<Matcher<const Element&> > matchers_;
+
+ GTEST_DISALLOW_ASSIGN_(ElementsAreMatcherImpl);
+};
+
+// Implements ElementsAre() of 0 arguments.
+class ElementsAreMatcher0 {
+ public:
+ ElementsAreMatcher0() {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef typename internal::StlContainerView<RawContainer>::type::value_type
+ Element;
+
+ const Matcher<const Element&>* const matchers = NULL;
+ return MakeMatcher(new ElementsAreMatcherImpl<Container>(matchers, 0));
+ }
+};
+
+// Implements ElementsAreArray().
+template <typename T>
+class ElementsAreArrayMatcher {
+ public:
+ ElementsAreArrayMatcher(const T* first, size_t count) :
+ first_(first), count_(count) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef typename internal::StlContainerView<RawContainer>::type::value_type
+ Element;
+
+ return MakeMatcher(new ElementsAreMatcherImpl<Container>(first_, count_));
+ }
+
+ private:
+ const T* const first_;
+ const size_t count_;
+
+ GTEST_DISALLOW_ASSIGN_(ElementsAreArrayMatcher);
+};
+
+// Returns the description for a matcher defined using the MATCHER*()
+// macro where the user-supplied description string is "", if
+// 'negation' is false; otherwise returns the description of the
+// negation of the matcher. 'param_values' contains a list of strings
+// that are the print-out of the matcher's parameters.
+string FormatMatcherDescription(bool negation, const char* matcher_name,
+ const Strings& param_values);
+
+} // namespace internal
+
+// Implements MatcherCast().
+template <typename T, typename M>
+inline Matcher<T> MatcherCast(M matcher) {
+ return internal::MatcherCastImpl<T, M>::Cast(matcher);
+}
+
+// _ is a matcher that matches anything of any type.
+//
+// This definition is fine as:
+//
+// 1. The C++ standard permits using the name _ in a namespace that
+// is not the global namespace or ::std.
+// 2. The AnythingMatcher class has no data member or constructor,
+// so it's OK to create global variables of this type.
+// 3. c-style has approved of using _ in this case.
+const internal::AnythingMatcher _ = {};
+// Creates a matcher that matches any value of the given type T.
+template <typename T>
+inline Matcher<T> A() { return MakeMatcher(new internal::AnyMatcherImpl<T>()); }
+
+// Creates a matcher that matches any value of the given type T.
+template <typename T>
+inline Matcher<T> An() { return A<T>(); }
+
+// Creates a polymorphic matcher that matches anything equal to x.
+// Note: if the parameter of Eq() were declared as const T&, Eq("foo")
+// wouldn't compile.
+template <typename T>
+inline internal::EqMatcher<T> Eq(T x) { return internal::EqMatcher<T>(x); }
+
+// Constructs a Matcher<T> from a 'value' of type T. The constructed
+// matcher matches any value that's equal to 'value'.
+template <typename T>
+Matcher<T>::Matcher(T value) { *this = Eq(value); }
+
+// Creates a monomorphic matcher that matches anything with type Lhs
+// and equal to rhs. A user may need to use this instead of Eq(...)
+// in order to resolve an overloading ambiguity.
+//
+// TypedEq<T>(x) is just a convenient short-hand for Matcher<T>(Eq(x))
+// or Matcher<T>(x), but more readable than the latter.
+//
+// We could define similar monomorphic matchers for other comparison
+// operations (e.g. TypedLt, TypedGe, and etc), but decided not to do
+// it yet as those are used much less than Eq() in practice. A user
+// can always write Matcher<T>(Lt(5)) to be explicit about the type,
+// for example.
+template <typename Lhs, typename Rhs>
+inline Matcher<Lhs> TypedEq(const Rhs& rhs) { return Eq(rhs); }
+
+// Creates a polymorphic matcher that matches anything >= x.
+template <typename Rhs>
+inline internal::GeMatcher<Rhs> Ge(Rhs x) {
+ return internal::GeMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches anything > x.
+template <typename Rhs>
+inline internal::GtMatcher<Rhs> Gt(Rhs x) {
+ return internal::GtMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches anything <= x.
+template <typename Rhs>
+inline internal::LeMatcher<Rhs> Le(Rhs x) {
+ return internal::LeMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches anything < x.
+template <typename Rhs>
+inline internal::LtMatcher<Rhs> Lt(Rhs x) {
+ return internal::LtMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches anything != x.
+template <typename Rhs>
+inline internal::NeMatcher<Rhs> Ne(Rhs x) {
+ return internal::NeMatcher<Rhs>(x);
+}
+
+// Creates a polymorphic matcher that matches any NULL pointer.
+inline PolymorphicMatcher<internal::IsNullMatcher > IsNull() {
+ return MakePolymorphicMatcher(internal::IsNullMatcher());
+}
+
+// Creates a polymorphic matcher that matches any non-NULL pointer.
+// This is convenient as Not(NULL) doesn't compile (the compiler
+// thinks that that expression is comparing a pointer with an integer).
+inline PolymorphicMatcher<internal::NotNullMatcher > NotNull() {
+ return MakePolymorphicMatcher(internal::NotNullMatcher());
+}
+
+// Creates a polymorphic matcher that matches any argument that
+// references variable x.
+template <typename T>
+inline internal::RefMatcher<T&> Ref(T& x) { // NOLINT
+ return internal::RefMatcher<T&>(x);
+}
+
+// Creates a matcher that matches any double argument approximately
+// equal to rhs, where two NANs are considered unequal.
+inline internal::FloatingEqMatcher<double> DoubleEq(double rhs) {
+ return internal::FloatingEqMatcher<double>(rhs, false);
+}
+
+// Creates a matcher that matches any double argument approximately
+// equal to rhs, including NaN values when rhs is NaN.
+inline internal::FloatingEqMatcher<double> NanSensitiveDoubleEq(double rhs) {
+ return internal::FloatingEqMatcher<double>(rhs, true);
+}
+
+// Creates a matcher that matches any float argument approximately
+// equal to rhs, where two NANs are considered unequal.
+inline internal::FloatingEqMatcher<float> FloatEq(float rhs) {
+ return internal::FloatingEqMatcher<float>(rhs, false);
+}
+
+// Creates a matcher that matches any double argument approximately
+// equal to rhs, including NaN values when rhs is NaN.
+inline internal::FloatingEqMatcher<float> NanSensitiveFloatEq(float rhs) {
+ return internal::FloatingEqMatcher<float>(rhs, true);
+}
+
+// Creates a matcher that matches a pointer (raw or smart) that points
+// to a value that matches inner_matcher.
+template <typename InnerMatcher>
+inline internal::PointeeMatcher<InnerMatcher> Pointee(
+ const InnerMatcher& inner_matcher) {
+ return internal::PointeeMatcher<InnerMatcher>(inner_matcher);
+}
+
+// Creates a matcher that matches an object whose given field matches
+// 'matcher'. For example,
+// Field(&Foo::number, Ge(5))
+// matches a Foo object x iff x.number >= 5.
+template <typename Class, typename FieldType, typename FieldMatcher>
+inline PolymorphicMatcher<
+ internal::FieldMatcher<Class, FieldType> > Field(
+ FieldType Class::*field, const FieldMatcher& matcher) {
+ return MakePolymorphicMatcher(
+ internal::FieldMatcher<Class, FieldType>(
+ field, MatcherCast<const FieldType&>(matcher)));
+ // The call to MatcherCast() is required for supporting inner
+ // matchers of compatible types. For example, it allows
+ // Field(&Foo::bar, m)
+ // to compile where bar is an int32 and m is a matcher for int64.
+}
+
+// Creates a matcher that matches an object whose given property
+// matches 'matcher'. For example,
+// Property(&Foo::str, StartsWith("hi"))
+// matches a Foo object x iff x.str() starts with "hi".
+template <typename Class, typename PropertyType, typename PropertyMatcher>
+inline PolymorphicMatcher<
+ internal::PropertyMatcher<Class, PropertyType> > Property(
+ PropertyType (Class::*property)() const, const PropertyMatcher& matcher) {
+ return MakePolymorphicMatcher(
+ internal::PropertyMatcher<Class, PropertyType>(
+ property,
+ MatcherCast<GTEST_REFERENCE_TO_CONST_(PropertyType)>(matcher)));
+ // The call to MatcherCast() is required for supporting inner
+ // matchers of compatible types. For example, it allows
+ // Property(&Foo::bar, m)
+ // to compile where bar() returns an int32 and m is a matcher for int64.
+}
+
+// Creates a matcher that matches an object iff the result of applying
+// a callable to x matches 'matcher'.
+// For example,
+// ResultOf(f, StartsWith("hi"))
+// matches a Foo object x iff f(x) starts with "hi".
+// callable parameter can be a function, function pointer, or a functor.
+// Callable has to satisfy the following conditions:
+// * It is required to keep no state affecting the results of
+// the calls on it and make no assumptions about how many calls
+// will be made. Any state it keeps must be protected from the
+// concurrent access.
+// * If it is a function object, it has to define type result_type.
+// We recommend deriving your functor classes from std::unary_function.
+template <typename Callable, typename ResultOfMatcher>
+internal::ResultOfMatcher<Callable> ResultOf(
+ Callable callable, const ResultOfMatcher& matcher) {
+ return internal::ResultOfMatcher<Callable>(
+ callable,
+ MatcherCast<typename internal::CallableTraits<Callable>::ResultType>(
+ matcher));
+ // The call to MatcherCast() is required for supporting inner
+ // matchers of compatible types. For example, it allows
+ // ResultOf(Function, m)
+ // to compile where Function() returns an int32 and m is a matcher for int64.
+}
+
+// String matchers.
+
+// Matches a string equal to str.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::string> >
+ StrEq(const internal::string& str) {
+ return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::string>(
+ str, true, true));
+}
+
+// Matches a string not equal to str.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::string> >
+ StrNe(const internal::string& str) {
+ return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::string>(
+ str, false, true));
+}
+
+// Matches a string equal to str, ignoring case.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::string> >
+ StrCaseEq(const internal::string& str) {
+ return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::string>(
+ str, true, false));
+}
+
+// Matches a string not equal to str, ignoring case.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::string> >
+ StrCaseNe(const internal::string& str) {
+ return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::string>(
+ str, false, false));
+}
+
+// Creates a matcher that matches any string, std::string, or C string
+// that contains the given substring.
+inline PolymorphicMatcher<internal::HasSubstrMatcher<internal::string> >
+ HasSubstr(const internal::string& substring) {
+ return MakePolymorphicMatcher(internal::HasSubstrMatcher<internal::string>(
+ substring));
+}
+
+// Matches a string that starts with 'prefix' (case-sensitive).
+inline PolymorphicMatcher<internal::StartsWithMatcher<internal::string> >
+ StartsWith(const internal::string& prefix) {
+ return MakePolymorphicMatcher(internal::StartsWithMatcher<internal::string>(
+ prefix));
+}
+
+// Matches a string that ends with 'suffix' (case-sensitive).
+inline PolymorphicMatcher<internal::EndsWithMatcher<internal::string> >
+ EndsWith(const internal::string& suffix) {
+ return MakePolymorphicMatcher(internal::EndsWithMatcher<internal::string>(
+ suffix));
+}
+
+// Matches a string that fully matches regular expression 'regex'.
+// The matcher takes ownership of 'regex'.
+inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex(
+ const internal::RE* regex) {
+ return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, true));
+}
+inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex(
+ const internal::string& regex) {
+ return MatchesRegex(new internal::RE(regex));
+}
+
+// Matches a string that contains regular expression 'regex'.
+// The matcher takes ownership of 'regex'.
+inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex(
+ const internal::RE* regex) {
+ return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, false));
+}
+inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex(
+ const internal::string& regex) {
+ return ContainsRegex(new internal::RE(regex));
+}
+
+#if GTEST_HAS_GLOBAL_WSTRING || GTEST_HAS_STD_WSTRING
+// Wide string matchers.
+
+// Matches a string equal to str.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::wstring> >
+ StrEq(const internal::wstring& str) {
+ return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::wstring>(
+ str, true, true));
+}
+
+// Matches a string not equal to str.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::wstring> >
+ StrNe(const internal::wstring& str) {
+ return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::wstring>(
+ str, false, true));
+}
+
+// Matches a string equal to str, ignoring case.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::wstring> >
+ StrCaseEq(const internal::wstring& str) {
+ return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::wstring>(
+ str, true, false));
+}
+
+// Matches a string not equal to str, ignoring case.
+inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::wstring> >
+ StrCaseNe(const internal::wstring& str) {
+ return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::wstring>(
+ str, false, false));
+}
+
+// Creates a matcher that matches any wstring, std::wstring, or C wide string
+// that contains the given substring.
+inline PolymorphicMatcher<internal::HasSubstrMatcher<internal::wstring> >
+ HasSubstr(const internal::wstring& substring) {
+ return MakePolymorphicMatcher(internal::HasSubstrMatcher<internal::wstring>(
+ substring));
+}
+
+// Matches a string that starts with 'prefix' (case-sensitive).
+inline PolymorphicMatcher<internal::StartsWithMatcher<internal::wstring> >
+ StartsWith(const internal::wstring& prefix) {
+ return MakePolymorphicMatcher(internal::StartsWithMatcher<internal::wstring>(
+ prefix));
+}
+
+// Matches a string that ends with 'suffix' (case-sensitive).
+inline PolymorphicMatcher<internal::EndsWithMatcher<internal::wstring> >
+ EndsWith(const internal::wstring& suffix) {
+ return MakePolymorphicMatcher(internal::EndsWithMatcher<internal::wstring>(
+ suffix));
+}
+
+#endif // GTEST_HAS_GLOBAL_WSTRING || GTEST_HAS_STD_WSTRING
+
+// Creates a polymorphic matcher that matches a 2-tuple where the
+// first field == the second field.
+inline internal::Eq2Matcher Eq() { return internal::Eq2Matcher(); }
+
+// Creates a polymorphic matcher that matches a 2-tuple where the
+// first field >= the second field.
+inline internal::Ge2Matcher Ge() { return internal::Ge2Matcher(); }
+
+// Creates a polymorphic matcher that matches a 2-tuple where the
+// first field > the second field.
+inline internal::Gt2Matcher Gt() { return internal::Gt2Matcher(); }
+
+// Creates a polymorphic matcher that matches a 2-tuple where the
+// first field <= the second field.
+inline internal::Le2Matcher Le() { return internal::Le2Matcher(); }
+
+// Creates a polymorphic matcher that matches a 2-tuple where the
+// first field < the second field.
+inline internal::Lt2Matcher Lt() { return internal::Lt2Matcher(); }
+
+// Creates a polymorphic matcher that matches a 2-tuple where the
+// first field != the second field.
+inline internal::Ne2Matcher Ne() { return internal::Ne2Matcher(); }
+
+// Creates a matcher that matches any value of type T that m doesn't
+// match.
+template <typename InnerMatcher>
+inline internal::NotMatcher<InnerMatcher> Not(InnerMatcher m) {
+ return internal::NotMatcher<InnerMatcher>(m);
+}
+
+// Returns a matcher that matches anything that satisfies the given
+// predicate. The predicate can be any unary function or functor
+// whose return type can be implicitly converted to bool.
+template <typename Predicate>
+inline PolymorphicMatcher<internal::TrulyMatcher<Predicate> >
+Truly(Predicate pred) {
+ return MakePolymorphicMatcher(internal::TrulyMatcher<Predicate>(pred));
+}
+
+// Returns a matcher that matches an equal container.
+// This matcher behaves like Eq(), but in the event of mismatch lists the
+// values that are included in one container but not the other. (Duplicate
+// values and order differences are not explained.)
+template <typename Container>
+inline PolymorphicMatcher<internal::ContainerEqMatcher< // NOLINT
+ GTEST_REMOVE_CONST_(Container)> >
+ ContainerEq(const Container& rhs) {
+ // This following line is for working around a bug in MSVC 8.0,
+ // which causes Container to be a const type sometimes.
+ typedef GTEST_REMOVE_CONST_(Container) RawContainer;
+ return MakePolymorphicMatcher(
+ internal::ContainerEqMatcher<RawContainer>(rhs));
+}
+
+// Matches an STL-style container or a native array that contains the
+// same number of elements as in rhs, where its i-th element and rhs's
+// i-th element (as a pair) satisfy the given pair matcher, for all i.
+// TupleMatcher must be able to be safely cast to Matcher<tuple<const
+// T1&, const T2&> >, where T1 and T2 are the types of elements in the
+// LHS container and the RHS container respectively.
+template <typename TupleMatcher, typename Container>
+inline internal::PointwiseMatcher<TupleMatcher,
+ GTEST_REMOVE_CONST_(Container)>
+Pointwise(const TupleMatcher& tuple_matcher, const Container& rhs) {
+ // This following line is for working around a bug in MSVC 8.0,
+ // which causes Container to be a const type sometimes.
+ typedef GTEST_REMOVE_CONST_(Container) RawContainer;
+ return internal::PointwiseMatcher<TupleMatcher, RawContainer>(
+ tuple_matcher, rhs);
+}
+
+// Matches an STL-style container or a native array that contains at
+// least one element matching the given value or matcher.
+//
+// Examples:
+// ::std::set<int> page_ids;
+// page_ids.insert(3);
+// page_ids.insert(1);
+// EXPECT_THAT(page_ids, Contains(1));
+// EXPECT_THAT(page_ids, Contains(Gt(2)));
+// EXPECT_THAT(page_ids, Not(Contains(4)));
+//
+// ::std::map<int, size_t> page_lengths;
+// page_lengths[1] = 100;
+// EXPECT_THAT(page_lengths,
+// Contains(::std::pair<const int, size_t>(1, 100)));
+//
+// const char* user_ids[] = { "joe", "mike", "tom" };
+// EXPECT_THAT(user_ids, Contains(Eq(::std::string("tom"))));
+template <typename M>
+inline internal::ContainsMatcher<M> Contains(M matcher) {
+ return internal::ContainsMatcher<M>(matcher);
+}
+
+// Matches an STL-style container or a native array that contains only
+// elements matching the given value or matcher.
+//
+// Each(m) is semantically equivalent to Not(Contains(Not(m))). Only
+// the messages are different.
+//
+// Examples:
+// ::std::set<int> page_ids;
+// // Each(m) matches an empty container, regardless of what m is.
+// EXPECT_THAT(page_ids, Each(Eq(1)));
+// EXPECT_THAT(page_ids, Each(Eq(77)));
+//
+// page_ids.insert(3);
+// EXPECT_THAT(page_ids, Each(Gt(0)));
+// EXPECT_THAT(page_ids, Not(Each(Gt(4))));
+// page_ids.insert(1);
+// EXPECT_THAT(page_ids, Not(Each(Lt(2))));
+//
+// ::std::map<int, size_t> page_lengths;
+// page_lengths[1] = 100;
+// page_lengths[2] = 200;
+// page_lengths[3] = 300;
+// EXPECT_THAT(page_lengths, Not(Each(Pair(1, 100))));
+// EXPECT_THAT(page_lengths, Each(Key(Le(3))));
+//
+// const char* user_ids[] = { "joe", "mike", "tom" };
+// EXPECT_THAT(user_ids, Not(Each(Eq(::std::string("tom")))));
+template <typename M>
+inline internal::EachMatcher<M> Each(M matcher) {
+ return internal::EachMatcher<M>(matcher);
+}
+
+// Key(inner_matcher) matches an std::pair whose 'first' field matches
+// inner_matcher. For example, Contains(Key(Ge(5))) can be used to match an
+// std::map that contains at least one element whose key is >= 5.
+template <typename M>
+inline internal::KeyMatcher<M> Key(M inner_matcher) {
+ return internal::KeyMatcher<M>(inner_matcher);
+}
+
+// Pair(first_matcher, second_matcher) matches a std::pair whose 'first' field
+// matches first_matcher and whose 'second' field matches second_matcher. For
+// example, EXPECT_THAT(map_type, ElementsAre(Pair(Ge(5), "foo"))) can be used
+// to match a std::map<int, string> that contains exactly one element whose key
+// is >= 5 and whose value equals "foo".
+template <typename FirstMatcher, typename SecondMatcher>
+inline internal::PairMatcher<FirstMatcher, SecondMatcher>
+Pair(FirstMatcher first_matcher, SecondMatcher second_matcher) {
+ return internal::PairMatcher<FirstMatcher, SecondMatcher>(
+ first_matcher, second_matcher);
+}
+
+// Returns a predicate that is satisfied by anything that matches the
+// given matcher.
+template <typename M>
+inline internal::MatcherAsPredicate<M> Matches(M matcher) {
+ return internal::MatcherAsPredicate<M>(matcher);
+}
+
+// Returns true iff the value matches the matcher.
+template <typename T, typename M>
+inline bool Value(const T& value, M matcher) {
+ return testing::Matches(matcher)(value);
+}
+
+// Matches the value against the given matcher and explains the match
+// result to listener.
+template <typename T, typename M>
+inline bool ExplainMatchResult(
+ M matcher, const T& value, MatchResultListener* listener) {
+ return SafeMatcherCast<const T&>(matcher).MatchAndExplain(value, listener);
+}
+
+// AllArgs(m) is a synonym of m. This is useful in
+//
+// EXPECT_CALL(foo, Bar(_, _)).With(AllArgs(Eq()));
+//
+// which is easier to read than
+//
+// EXPECT_CALL(foo, Bar(_, _)).With(Eq());
+template <typename InnerMatcher>
+inline InnerMatcher AllArgs(const InnerMatcher& matcher) { return matcher; }
+
+// These macros allow using matchers to check values in Google Test
+// tests. ASSERT_THAT(value, matcher) and EXPECT_THAT(value, matcher)
+// succeed iff the value matches the matcher. If the assertion fails,
+// the value and the description of the matcher will be printed.
+#define ASSERT_THAT(value, matcher) ASSERT_PRED_FORMAT1(\
+ ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value)
+#define EXPECT_THAT(value, matcher) EXPECT_PRED_FORMAT1(\
+ ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value)
+
+} // namespace testing
+
+#endif // GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_
+
+namespace testing {
+
+// An abstract handle of an expectation.
+class Expectation;
+
+// A set of expectation handles.
+class ExpectationSet;
+
+// Anything inside the 'internal' namespace IS INTERNAL IMPLEMENTATION
+// and MUST NOT BE USED IN USER CODE!!!
+namespace internal {
+
+// Implements a mock function.
+template <typename F> class FunctionMocker;
+
+// Base class for expectations.
+class ExpectationBase;
+
+// Implements an expectation.
+template <typename F> class TypedExpectation;
+
+// Helper class for testing the Expectation class template.
+class ExpectationTester;
+
+// Base class for function mockers.
+template <typename F> class FunctionMockerBase;
+
+// Protects the mock object registry (in class Mock), all function
+// mockers, and all expectations.
+//
+// The reason we don't use more fine-grained protection is: when a
+// mock function Foo() is called, it needs to consult its expectations
+// to see which one should be picked. If another thread is allowed to
+// call a mock function (either Foo() or a different one) at the same
+// time, it could affect the "retired" attributes of Foo()'s
+// expectations when InSequence() is used, and thus affect which
+// expectation gets picked. Therefore, we sequence all mock function
+// calls to ensure the integrity of the mock objects' states.
+GTEST_DECLARE_STATIC_MUTEX_(g_gmock_mutex);
+
+// Untyped base class for ActionResultHolder<R>.
+class UntypedActionResultHolderBase;
+
+// Abstract base class of FunctionMockerBase. This is the
+// type-agnostic part of the function mocker interface. Its pure
+// virtual methods are implemented by FunctionMockerBase.
+class UntypedFunctionMockerBase {
+ public:
+ UntypedFunctionMockerBase();
+ virtual ~UntypedFunctionMockerBase();
+
+ // Verifies that all expectations on this mock function have been
+ // satisfied. Reports one or more Google Test non-fatal failures
+ // and returns false if not.
+ // L >= g_gmock_mutex
+ bool VerifyAndClearExpectationsLocked();
+
+ // Clears the ON_CALL()s set on this mock function.
+ // L >= g_gmock_mutex
+ virtual void ClearDefaultActionsLocked() = 0;
+
+ // In all of the following Untyped* functions, it's the caller's
+ // responsibility to guarantee the correctness of the arguments'
+ // types.
+
+ // Performs the default action with the given arguments and returns
+ // the action's result. The call description string will be used in
+ // the error message to describe the call in the case the default
+ // action fails.
+ // L = *
+ virtual UntypedActionResultHolderBase* UntypedPerformDefaultAction(
+ const void* untyped_args,
+ const string& call_description) const = 0;
+
+ // Performs the given action with the given arguments and returns
+ // the action's result.
+ // L = *
+ virtual UntypedActionResultHolderBase* UntypedPerformAction(
+ const void* untyped_action,
+ const void* untyped_args) const = 0;
+
+ // Writes a message that the call is uninteresting (i.e. neither
+ // explicitly expected nor explicitly unexpected) to the given
+ // ostream.
+ // L < g_gmock_mutex
+ virtual void UntypedDescribeUninterestingCall(const void* untyped_args,
+ ::std::ostream* os) const = 0;
+
+ // Returns the expectation that matches the given function arguments
+ // (or NULL is there's no match); when a match is found,
+ // untyped_action is set to point to the action that should be
+ // performed (or NULL if the action is "do default"), and
+ // is_excessive is modified to indicate whether the call exceeds the
+ // expected number.
+ // L < g_gmock_mutex
+ virtual const ExpectationBase* UntypedFindMatchingExpectation(
+ const void* untyped_args,
+ const void** untyped_action, bool* is_excessive,
+ ::std::ostream* what, ::std::ostream* why) = 0;
+
+ // Prints the given function arguments to the ostream.
+ virtual void UntypedPrintArgs(const void* untyped_args,
+ ::std::ostream* os) const = 0;
+
+ // Sets the mock object this mock method belongs to, and registers
+ // this information in the global mock registry. Will be called
+ // whenever an EXPECT_CALL() or ON_CALL() is executed on this mock
+ // method.
+ // TODO(wan@google.com): rename to SetAndRegisterOwner().
+ // L < g_gmock_mutex
+ void RegisterOwner(const void* mock_obj);
+
+ // Sets the mock object this mock method belongs to, and sets the
+ // name of the mock function. Will be called upon each invocation
+ // of this mock function.
+ // L < g_gmock_mutex
+ void SetOwnerAndName(const void* mock_obj, const char* name);
+
+ // Returns the mock object this mock method belongs to. Must be
+ // called after RegisterOwner() or SetOwnerAndName() has been
+ // called.
+ // L < g_gmock_mutex
+ const void* MockObject() const;
+
+ // Returns the name of this mock method. Must be called after
+ // SetOwnerAndName() has been called.
+ // L < g_gmock_mutex
+ const char* Name() const;
+
+ // Returns the result of invoking this mock function with the given
+ // arguments. This function can be safely called from multiple
+ // threads concurrently. The caller is responsible for deleting the
+ // result.
+ // L < g_gmock_mutex
+ const UntypedActionResultHolderBase* UntypedInvokeWith(
+ const void* untyped_args);
+
+ protected:
+ typedef std::vector<const void*> UntypedOnCallSpecs;
+
+ typedef std::vector<internal::linked_ptr<ExpectationBase> >
+ UntypedExpectations;
+
+ // Returns an Expectation object that references and co-owns exp,
+ // which must be an expectation on this mock function.
+ Expectation GetHandleOf(ExpectationBase* exp);
+
+ // Address of the mock object this mock method belongs to. Only
+ // valid after this mock method has been called or
+ // ON_CALL/EXPECT_CALL has been invoked on it.
+ const void* mock_obj_; // Protected by g_gmock_mutex.
+
+ // Name of the function being mocked. Only valid after this mock
+ // method has been called.
+ const char* name_; // Protected by g_gmock_mutex.
+
+ // All default action specs for this function mocker.
+ UntypedOnCallSpecs untyped_on_call_specs_;
+
+ // All expectations for this function mocker.
+ UntypedExpectations untyped_expectations_;
+}; // class UntypedFunctionMockerBase
+
+// Untyped base class for OnCallSpec<F>.
+class UntypedOnCallSpecBase {
+ public:
+ // The arguments are the location of the ON_CALL() statement.
+ UntypedOnCallSpecBase(const char* a_file, int a_line)
+ : file_(a_file), line_(a_line), last_clause_(kNone) {}
+
+ // Where in the source file was the default action spec defined?
+ const char* file() const { return file_; }
+ int line() const { return line_; }
+
+ protected:
+ // Gives each clause in the ON_CALL() statement a name.
+ enum Clause {
+ // Do not change the order of the enum members! The run-time
+ // syntax checking relies on it.
+ kNone,
+ kWith,
+ kWillByDefault
+ };
+
+ // Asserts that the ON_CALL() statement has a certain property.
+ void AssertSpecProperty(bool property, const string& failure_message) const {
+ Assert(property, file_, line_, failure_message);
+ }
+
+ // Expects that the ON_CALL() statement has a certain property.
+ void ExpectSpecProperty(bool property, const string& failure_message) const {
+ Expect(property, file_, line_, failure_message);
+ }
+
+ const char* file_;
+ int line_;
+
+ // The last clause in the ON_CALL() statement as seen so far.
+ // Initially kNone and changes as the statement is parsed.
+ Clause last_clause_;
+}; // class UntypedOnCallSpecBase
+
+// This template class implements an ON_CALL spec.
+template <typename F>
+class OnCallSpec : public UntypedOnCallSpecBase {
+ public:
+ typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+ typedef typename Function<F>::ArgumentMatcherTuple ArgumentMatcherTuple;
+
+ // Constructs an OnCallSpec object from the information inside
+ // the parenthesis of an ON_CALL() statement.
+ OnCallSpec(const char* a_file, int a_line,
+ const ArgumentMatcherTuple& matchers)
+ : UntypedOnCallSpecBase(a_file, a_line),
+ matchers_(matchers),
+ // By default, extra_matcher_ should match anything. However,
+ // we cannot initialize it with _ as that triggers a compiler
+ // bug in Symbian's C++ compiler (cannot decide between two
+ // overloaded constructors of Matcher<const ArgumentTuple&>).
+ extra_matcher_(A<const ArgumentTuple&>()) {
+ }
+
+ // Implements the .With() clause.
+ OnCallSpec& With(const Matcher<const ArgumentTuple&>& m) {
+ // Makes sure this is called at most once.
+ ExpectSpecProperty(last_clause_ < kWith,
+ ".With() cannot appear "
+ "more than once in an ON_CALL().");
+ last_clause_ = kWith;
+
+ extra_matcher_ = m;
+ return *this;
+ }
+
+ // Implements the .WillByDefault() clause.
+ OnCallSpec& WillByDefault(const Action<F>& action) {
+ ExpectSpecProperty(last_clause_ < kWillByDefault,
+ ".WillByDefault() must appear "
+ "exactly once in an ON_CALL().");
+ last_clause_ = kWillByDefault;
+
+ ExpectSpecProperty(!action.IsDoDefault(),
+ "DoDefault() cannot be used in ON_CALL().");
+ action_ = action;
+ return *this;
+ }
+
+ // Returns true iff the given arguments match the matchers.
+ bool Matches(const ArgumentTuple& args) const {
+ return TupleMatches(matchers_, args) && extra_matcher_.Matches(args);
+ }
+
+ // Returns the action specified by the user.
+ const Action<F>& GetAction() const {
+ AssertSpecProperty(last_clause_ == kWillByDefault,
+ ".WillByDefault() must appear exactly "
+ "once in an ON_CALL().");
+ return action_;
+ }
+
+ private:
+ // The information in statement
+ //
+ // ON_CALL(mock_object, Method(matchers))
+ // .With(multi-argument-matcher)
+ // .WillByDefault(action);
+ //
+ // is recorded in the data members like this:
+ //
+ // source file that contains the statement => file_
+ // line number of the statement => line_
+ // matchers => matchers_
+ // multi-argument-matcher => extra_matcher_
+ // action => action_
+ ArgumentMatcherTuple matchers_;
+ Matcher<const ArgumentTuple&> extra_matcher_;
+ Action<F> action_;
+}; // class OnCallSpec
+
+// Possible reactions on uninteresting calls. TODO(wan@google.com):
+// rename the enum values to the kFoo style.
+enum CallReaction {
+ ALLOW,
+ WARN,
+ FAIL
+};
+
+} // namespace internal
+
+// Utilities for manipulating mock objects.
+class Mock {
+ public:
+ // The following public methods can be called concurrently.
+
+ // Tells Google Mock to ignore mock_obj when checking for leaked
+ // mock objects.
+ static void AllowLeak(const void* mock_obj);
+
+ // Verifies and clears all expectations on the given mock object.
+ // If the expectations aren't satisfied, generates one or more
+ // Google Test non-fatal failures and returns false.
+ static bool VerifyAndClearExpectations(void* mock_obj);
+
+ // Verifies all expectations on the given mock object and clears its
+ // default actions and expectations. Returns true iff the
+ // verification was successful.
+ static bool VerifyAndClear(void* mock_obj);
+ private:
+ friend class internal::UntypedFunctionMockerBase;
+
+ // Needed for a function mocker to register itself (so that we know
+ // how to clear a mock object).
+ template <typename F>
+ friend class internal::FunctionMockerBase;
+
+ template <typename M>
+ friend class NiceMock;
+
+ template <typename M>
+ friend class StrictMock;
+
+ // Tells Google Mock to allow uninteresting calls on the given mock
+ // object.
+ // L < g_gmock_mutex
+ static void AllowUninterestingCalls(const void* mock_obj);
+
+ // Tells Google Mock to warn the user about uninteresting calls on
+ // the given mock object.
+ // L < g_gmock_mutex
+ static void WarnUninterestingCalls(const void* mock_obj);
+
+ // Tells Google Mock to fail uninteresting calls on the given mock
+ // object.
+ // L < g_gmock_mutex
+ static void FailUninterestingCalls(const void* mock_obj);
+
+ // Tells Google Mock the given mock object is being destroyed and
+ // its entry in the call-reaction table should be removed.
+ // L < g_gmock_mutex
+ static void UnregisterCallReaction(const void* mock_obj);
+
+ // Returns the reaction Google Mock will have on uninteresting calls
+ // made on the given mock object.
+ // L < g_gmock_mutex
+ static internal::CallReaction GetReactionOnUninterestingCalls(
+ const void* mock_obj);
+
+ // Verifies that all expectations on the given mock object have been
+ // satisfied. Reports one or more Google Test non-fatal failures
+ // and returns false if not.
+ // L >= g_gmock_mutex
+ static bool VerifyAndClearExpectationsLocked(void* mock_obj);
+
+ // Clears all ON_CALL()s set on the given mock object.
+ // L >= g_gmock_mutex
+ static void ClearDefaultActionsLocked(void* mock_obj);
+
+ // Registers a mock object and a mock method it owns.
+ // L < g_gmock_mutex
+ static void Register(const void* mock_obj,
+ internal::UntypedFunctionMockerBase* mocker);
+
+ // Tells Google Mock where in the source code mock_obj is used in an
+ // ON_CALL or EXPECT_CALL. In case mock_obj is leaked, this
+ // information helps the user identify which object it is.
+ // L < g_gmock_mutex
+ static void RegisterUseByOnCallOrExpectCall(
+ const void* mock_obj, const char* file, int line);
+
+ // Unregisters a mock method; removes the owning mock object from
+ // the registry when the last mock method associated with it has
+ // been unregistered. This is called only in the destructor of
+ // FunctionMockerBase.
+ // L >= g_gmock_mutex
+ static void UnregisterLocked(internal::UntypedFunctionMockerBase* mocker);
+}; // class Mock
+
+// An abstract handle of an expectation. Useful in the .After()
+// clause of EXPECT_CALL() for setting the (partial) order of
+// expectations. The syntax:
+//
+// Expectation e1 = EXPECT_CALL(...)...;
+// EXPECT_CALL(...).After(e1)...;
+//
+// sets two expectations where the latter can only be matched after
+// the former has been satisfied.
+//
+// Notes:
+// - This class is copyable and has value semantics.
+// - Constness is shallow: a const Expectation object itself cannot
+// be modified, but the mutable methods of the ExpectationBase
+// object it references can be called via expectation_base().
+// - The constructors and destructor are defined out-of-line because
+// the Symbian WINSCW compiler wants to otherwise instantiate them
+// when it sees this class definition, at which point it doesn't have
+// ExpectationBase available yet, leading to incorrect destruction
+// in the linked_ptr (or compilation errors if using a checking
+// linked_ptr).
+class Expectation {
+ public:
+ // Constructs a null object that doesn't reference any expectation.
+ Expectation();
+
+ ~Expectation();
+
+ // This single-argument ctor must not be explicit, in order to support the
+ // Expectation e = EXPECT_CALL(...);
+ // syntax.
+ //
+ // A TypedExpectation object stores its pre-requisites as
+ // Expectation objects, and needs to call the non-const Retire()
+ // method on the ExpectationBase objects they reference. Therefore
+ // Expectation must receive a *non-const* reference to the
+ // ExpectationBase object.
+ Expectation(internal::ExpectationBase& exp); // NOLINT
+
+ // The compiler-generated copy ctor and operator= work exactly as
+ // intended, so we don't need to define our own.
+
+ // Returns true iff rhs references the same expectation as this object does.
+ bool operator==(const Expectation& rhs) const {
+ return expectation_base_ == rhs.expectation_base_;
+ }
+
+ bool operator!=(const Expectation& rhs) const { return !(*this == rhs); }
+
+ private:
+ friend class ExpectationSet;
+ friend class Sequence;
+ friend class ::testing::internal::ExpectationBase;
+ friend class ::testing::internal::UntypedFunctionMockerBase;
+
+ template <typename F>
+ friend class ::testing::internal::FunctionMockerBase;
+
+ template <typename F>
+ friend class ::testing::internal::TypedExpectation;
+
+ // This comparator is needed for putting Expectation objects into a set.
+ class Less {
+ public:
+ bool operator()(const Expectation& lhs, const Expectation& rhs) const {
+ return lhs.expectation_base_.get() < rhs.expectation_base_.get();
+ }
+ };
+
+ typedef ::std::set<Expectation, Less> Set;
+
+ Expectation(
+ const internal::linked_ptr<internal::ExpectationBase>& expectation_base);
+
+ // Returns the expectation this object references.
+ const internal::linked_ptr<internal::ExpectationBase>&
+ expectation_base() const {
+ return expectation_base_;
+ }
+
+ // A linked_ptr that co-owns the expectation this handle references.
+ internal::linked_ptr<internal::ExpectationBase> expectation_base_;
+};
+
+// A set of expectation handles. Useful in the .After() clause of
+// EXPECT_CALL() for setting the (partial) order of expectations. The
+// syntax:
+//
+// ExpectationSet es;
+// es += EXPECT_CALL(...)...;
+// es += EXPECT_CALL(...)...;
+// EXPECT_CALL(...).After(es)...;
+//
+// sets three expectations where the last one can only be matched
+// after the first two have both been satisfied.
+//
+// This class is copyable and has value semantics.
+class ExpectationSet {
+ public:
+ // A bidirectional iterator that can read a const element in the set.
+ typedef Expectation::Set::const_iterator const_iterator;
+
+ // An object stored in the set. This is an alias of Expectation.
+ typedef Expectation::Set::value_type value_type;
+
+ // Constructs an empty set.
+ ExpectationSet() {}
+
+ // This single-argument ctor must not be explicit, in order to support the
+ // ExpectationSet es = EXPECT_CALL(...);
+ // syntax.
+ ExpectationSet(internal::ExpectationBase& exp) { // NOLINT
+ *this += Expectation(exp);
+ }
+
+ // This single-argument ctor implements implicit conversion from
+ // Expectation and thus must not be explicit. This allows either an
+ // Expectation or an ExpectationSet to be used in .After().
+ ExpectationSet(const Expectation& e) { // NOLINT
+ *this += e;
+ }
+
+ // The compiler-generator ctor and operator= works exactly as
+ // intended, so we don't need to define our own.
+
+ // Returns true iff rhs contains the same set of Expectation objects
+ // as this does.
+ bool operator==(const ExpectationSet& rhs) const {
+ return expectations_ == rhs.expectations_;
+ }
+
+ bool operator!=(const ExpectationSet& rhs) const { return !(*this == rhs); }
+
+ // Implements the syntax
+ // expectation_set += EXPECT_CALL(...);
+ ExpectationSet& operator+=(const Expectation& e) {
+ expectations_.insert(e);
+ return *this;
+ }
+
+ int size() const { return static_cast<int>(expectations_.size()); }
+
+ const_iterator begin() const { return expectations_.begin(); }
+ const_iterator end() const { return expectations_.end(); }
+
+ private:
+ Expectation::Set expectations_;
+};
+
+
+// Sequence objects are used by a user to specify the relative order
+// in which the expectations should match. They are copyable (we rely
+// on the compiler-defined copy constructor and assignment operator).
+class Sequence {
+ public:
+ // Constructs an empty sequence.
+ Sequence() : last_expectation_(new Expectation) {}
+
+ // Adds an expectation to this sequence. The caller must ensure
+ // that no other thread is accessing this Sequence object.
+ void AddExpectation(const Expectation& expectation) const;
+
+ private:
+ // The last expectation in this sequence. We use a linked_ptr here
+ // because Sequence objects are copyable and we want the copies to
+ // be aliases. The linked_ptr allows the copies to co-own and share
+ // the same Expectation object.
+ internal::linked_ptr<Expectation> last_expectation_;
+}; // class Sequence
+
+// An object of this type causes all EXPECT_CALL() statements
+// encountered in its scope to be put in an anonymous sequence. The
+// work is done in the constructor and destructor. You should only
+// create an InSequence object on the stack.
+//
+// The sole purpose for this class is to support easy definition of
+// sequential expectations, e.g.
+//
+// {
+// InSequence dummy; // The name of the object doesn't matter.
+//
+// // The following expectations must match in the order they appear.
+// EXPECT_CALL(a, Bar())...;
+// EXPECT_CALL(a, Baz())...;
+// ...
+// EXPECT_CALL(b, Xyz())...;
+// }
+//
+// You can create InSequence objects in multiple threads, as long as
+// they are used to affect different mock objects. The idea is that
+// each thread can create and set up its own mocks as if it's the only
+// thread. However, for clarity of your tests we recommend you to set
+// up mocks in the main thread unless you have a good reason not to do
+// so.
+class InSequence {
+ public:
+ InSequence();
+ ~InSequence();
+ private:
+ bool sequence_created_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(InSequence); // NOLINT
+} GTEST_ATTRIBUTE_UNUSED_;
+
+namespace internal {
+
+// Points to the implicit sequence introduced by a living InSequence
+// object (if any) in the current thread or NULL.
+extern ThreadLocal<Sequence*> g_gmock_implicit_sequence;
+
+// Base class for implementing expectations.
+//
+// There are two reasons for having a type-agnostic base class for
+// Expectation:
+//
+// 1. We need to store collections of expectations of different
+// types (e.g. all pre-requisites of a particular expectation, all
+// expectations in a sequence). Therefore these expectation objects
+// must share a common base class.
+//
+// 2. We can avoid binary code bloat by moving methods not depending
+// on the template argument of Expectation to the base class.
+//
+// This class is internal and mustn't be used by user code directly.
+class ExpectationBase {
+ public:
+ // source_text is the EXPECT_CALL(...) source that created this Expectation.
+ ExpectationBase(const char* file, int line, const string& source_text);
+
+ virtual ~ExpectationBase();
+
+ // Where in the source file was the expectation spec defined?
+ const char* file() const { return file_; }
+ int line() const { return line_; }
+ const char* source_text() const { return source_text_.c_str(); }
+ // Returns the cardinality specified in the expectation spec.
+ const Cardinality& cardinality() const { return cardinality_; }
+
+ // Describes the source file location of this expectation.
+ void DescribeLocationTo(::std::ostream* os) const {
+ *os << FormatFileLocation(file(), line()) << " ";
+ }
+
+ // Describes how many times a function call matching this
+ // expectation has occurred.
+ // L >= g_gmock_mutex
+ void DescribeCallCountTo(::std::ostream* os) const;
+
+ // If this mock method has an extra matcher (i.e. .With(matcher)),
+ // describes it to the ostream.
+ virtual void MaybeDescribeExtraMatcherTo(::std::ostream* os) = 0;
+
+ protected:
+ friend class ::testing::Expectation;
+ friend class UntypedFunctionMockerBase;
+
+ enum Clause {
+ // Don't change the order of the enum members!
+ kNone,
+ kWith,
+ kTimes,
+ kInSequence,
+ kAfter,
+ kWillOnce,
+ kWillRepeatedly,
+ kRetiresOnSaturation
+ };
+
+ typedef std::vector<const void*> UntypedActions;
+
+ // Returns an Expectation object that references and co-owns this
+ // expectation.
+ virtual Expectation GetHandle() = 0;
+
+ // Asserts that the EXPECT_CALL() statement has the given property.
+ void AssertSpecProperty(bool property, const string& failure_message) const {
+ Assert(property, file_, line_, failure_message);
+ }
+
+ // Expects that the EXPECT_CALL() statement has the given property.
+ void ExpectSpecProperty(bool property, const string& failure_message) const {
+ Expect(property, file_, line_, failure_message);
+ }
+
+ // Explicitly specifies the cardinality of this expectation. Used
+ // by the subclasses to implement the .Times() clause.
+ void SpecifyCardinality(const Cardinality& cardinality);
+
+ // Returns true iff the user specified the cardinality explicitly
+ // using a .Times().
+ bool cardinality_specified() const { return cardinality_specified_; }
+
+ // Sets the cardinality of this expectation spec.
+ void set_cardinality(const Cardinality& a_cardinality) {
+ cardinality_ = a_cardinality;
+ }
+
+ // The following group of methods should only be called after the
+ // EXPECT_CALL() statement, and only when g_gmock_mutex is held by
+ // the current thread.
+
+ // Retires all pre-requisites of this expectation.
+ // L >= g_gmock_mutex
+ void RetireAllPreRequisites();
+
+ // Returns true iff this expectation is retired.
+ // L >= g_gmock_mutex
+ bool is_retired() const {
+ g_gmock_mutex.AssertHeld();
+ return retired_;
+ }
+
+ // Retires this expectation.
+ // L >= g_gmock_mutex
+ void Retire() {
+ g_gmock_mutex.AssertHeld();
+ retired_ = true;
+ }
+
+ // Returns true iff this expectation is satisfied.
+ // L >= g_gmock_mutex
+ bool IsSatisfied() const {
+ g_gmock_mutex.AssertHeld();
+ return cardinality().IsSatisfiedByCallCount(call_count_);
+ }
+
+ // Returns true iff this expectation is saturated.
+ // L >= g_gmock_mutex
+ bool IsSaturated() const {
+ g_gmock_mutex.AssertHeld();
+ return cardinality().IsSaturatedByCallCount(call_count_);
+ }
+
+ // Returns true iff this expectation is over-saturated.
+ // L >= g_gmock_mutex
+ bool IsOverSaturated() const {
+ g_gmock_mutex.AssertHeld();
+ return cardinality().IsOverSaturatedByCallCount(call_count_);
+ }
+
+ // Returns true iff all pre-requisites of this expectation are satisfied.
+ // L >= g_gmock_mutex
+ bool AllPrerequisitesAreSatisfied() const;
+
+ // Adds unsatisfied pre-requisites of this expectation to 'result'.
+ // L >= g_gmock_mutex
+ void FindUnsatisfiedPrerequisites(ExpectationSet* result) const;
+
+ // Returns the number this expectation has been invoked.
+ // L >= g_gmock_mutex
+ int call_count() const {
+ g_gmock_mutex.AssertHeld();
+ return call_count_;
+ }
+
+ // Increments the number this expectation has been invoked.
+ // L >= g_gmock_mutex
+ void IncrementCallCount() {
+ g_gmock_mutex.AssertHeld();
+ call_count_++;
+ }
+
+ // Checks the action count (i.e. the number of WillOnce() and
+ // WillRepeatedly() clauses) against the cardinality if this hasn't
+ // been done before. Prints a warning if there are too many or too
+ // few actions.
+ // L < mutex_
+ void CheckActionCountIfNotDone() const;
+
+ friend class ::testing::Sequence;
+ friend class ::testing::internal::ExpectationTester;
+
+ template <typename Function>
+ friend class TypedExpectation;
+
+ // Implements the .Times() clause.
+ void UntypedTimes(const Cardinality& a_cardinality);
+
+ // This group of fields are part of the spec and won't change after
+ // an EXPECT_CALL() statement finishes.
+ const char* file_; // The file that contains the expectation.
+ int line_; // The line number of the expectation.
+ const string source_text_; // The EXPECT_CALL(...) source text.
+ // True iff the cardinality is specified explicitly.
+ bool cardinality_specified_;
+ Cardinality cardinality_; // The cardinality of the expectation.
+ // The immediate pre-requisites (i.e. expectations that must be
+ // satisfied before this expectation can be matched) of this
+ // expectation. We use linked_ptr in the set because we want an
+ // Expectation object to be co-owned by its FunctionMocker and its
+ // successors. This allows multiple mock objects to be deleted at
+ // different times.
+ ExpectationSet immediate_prerequisites_;
+
+ // This group of fields are the current state of the expectation,
+ // and can change as the mock function is called.
+ int call_count_; // How many times this expectation has been invoked.
+ bool retired_; // True iff this expectation has retired.
+ UntypedActions untyped_actions_;
+ bool extra_matcher_specified_;
+ bool repeated_action_specified_; // True if a WillRepeatedly() was specified.
+ bool retires_on_saturation_;
+ Clause last_clause_;
+ mutable bool action_count_checked_; // Under mutex_.
+ mutable Mutex mutex_; // Protects action_count_checked_.
+
+ GTEST_DISALLOW_ASSIGN_(ExpectationBase);
+}; // class ExpectationBase
+
+// Impements an expectation for the given function type.
+template <typename F>
+class TypedExpectation : public ExpectationBase {
+ public:
+ typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+ typedef typename Function<F>::ArgumentMatcherTuple ArgumentMatcherTuple;
+ typedef typename Function<F>::Result Result;
+
+ TypedExpectation(FunctionMockerBase<F>* owner,
+ const char* a_file, int a_line, const string& a_source_text,
+ const ArgumentMatcherTuple& m)
+ : ExpectationBase(a_file, a_line, a_source_text),
+ owner_(owner),
+ matchers_(m),
+ // By default, extra_matcher_ should match anything. However,
+ // we cannot initialize it with _ as that triggers a compiler
+ // bug in Symbian's C++ compiler (cannot decide between two
+ // overloaded constructors of Matcher<const ArgumentTuple&>).
+ extra_matcher_(A<const ArgumentTuple&>()),
+ repeated_action_(DoDefault()) {}
+
+ virtual ~TypedExpectation() {
+ // Check the validity of the action count if it hasn't been done
+ // yet (for example, if the expectation was never used).
+ CheckActionCountIfNotDone();
+ for (UntypedActions::const_iterator it = untyped_actions_.begin();
+ it != untyped_actions_.end(); ++it) {
+ delete static_cast<const Action<F>*>(*it);
+ }
+ }
+
+ // Implements the .With() clause.
+ TypedExpectation& With(const Matcher<const ArgumentTuple&>& m) {
+ if (last_clause_ == kWith) {
+ ExpectSpecProperty(false,
+ ".With() cannot appear "
+ "more than once in an EXPECT_CALL().");
+ } else {
+ ExpectSpecProperty(last_clause_ < kWith,
+ ".With() must be the first "
+ "clause in an EXPECT_CALL().");
+ }
+ last_clause_ = kWith;
+
+ extra_matcher_ = m;
+ extra_matcher_specified_ = true;
+ return *this;
+ }
+
+ // Implements the .Times() clause.
+ TypedExpectation& Times(const Cardinality& a_cardinality) {
+ ExpectationBase::UntypedTimes(a_cardinality);
+ return *this;
+ }
+
+ // Implements the .Times() clause.
+ TypedExpectation& Times(int n) {
+ return Times(Exactly(n));
+ }
+
+ // Implements the .InSequence() clause.
+ TypedExpectation& InSequence(const Sequence& s) {
+ ExpectSpecProperty(last_clause_ <= kInSequence,
+ ".InSequence() cannot appear after .After(),"
+ " .WillOnce(), .WillRepeatedly(), or "
+ ".RetiresOnSaturation().");
+ last_clause_ = kInSequence;
+
+ s.AddExpectation(GetHandle());
+ return *this;
+ }
+ TypedExpectation& InSequence(const Sequence& s1, const Sequence& s2) {
+ return InSequence(s1).InSequence(s2);
+ }
+ TypedExpectation& InSequence(const Sequence& s1, const Sequence& s2,
+ const Sequence& s3) {
+ return InSequence(s1, s2).InSequence(s3);
+ }
+ TypedExpectation& InSequence(const Sequence& s1, const Sequence& s2,
+ const Sequence& s3, const Sequence& s4) {
+ return InSequence(s1, s2, s3).InSequence(s4);
+ }
+ TypedExpectation& InSequence(const Sequence& s1, const Sequence& s2,
+ const Sequence& s3, const Sequence& s4,
+ const Sequence& s5) {
+ return InSequence(s1, s2, s3, s4).InSequence(s5);
+ }
+
+ // Implements that .After() clause.
+ TypedExpectation& After(const ExpectationSet& s) {
+ ExpectSpecProperty(last_clause_ <= kAfter,
+ ".After() cannot appear after .WillOnce(),"
+ " .WillRepeatedly(), or "
+ ".RetiresOnSaturation().");
+ last_clause_ = kAfter;
+
+ for (ExpectationSet::const_iterator it = s.begin(); it != s.end(); ++it) {
+ immediate_prerequisites_ += *it;
+ }
+ return *this;
+ }
+ TypedExpectation& After(const ExpectationSet& s1, const ExpectationSet& s2) {
+ return After(s1).After(s2);
+ }
+ TypedExpectation& After(const ExpectationSet& s1, const ExpectationSet& s2,
+ const ExpectationSet& s3) {
+ return After(s1, s2).After(s3);
+ }
+ TypedExpectation& After(const ExpectationSet& s1, const ExpectationSet& s2,
+ const ExpectationSet& s3, const ExpectationSet& s4) {
+ return After(s1, s2, s3).After(s4);
+ }
+ TypedExpectation& After(const ExpectationSet& s1, const ExpectationSet& s2,
+ const ExpectationSet& s3, const ExpectationSet& s4,
+ const ExpectationSet& s5) {
+ return After(s1, s2, s3, s4).After(s5);
+ }
+
+ // Implements the .WillOnce() clause.
+ TypedExpectation& WillOnce(const Action<F>& action) {
+ ExpectSpecProperty(last_clause_ <= kWillOnce,
+ ".WillOnce() cannot appear after "
+ ".WillRepeatedly() or .RetiresOnSaturation().");
+ last_clause_ = kWillOnce;
+
+ untyped_actions_.push_back(new Action<F>(action));
+ if (!cardinality_specified()) {
+ set_cardinality(Exactly(static_cast<int>(untyped_actions_.size())));
+ }
+ return *this;
+ }
+
+ // Implements the .WillRepeatedly() clause.
+ TypedExpectation& WillRepeatedly(const Action<F>& action) {
+ if (last_clause_ == kWillRepeatedly) {
+ ExpectSpecProperty(false,
+ ".WillRepeatedly() cannot appear "
+ "more than once in an EXPECT_CALL().");
+ } else {
+ ExpectSpecProperty(last_clause_ < kWillRepeatedly,
+ ".WillRepeatedly() cannot appear "
+ "after .RetiresOnSaturation().");
+ }
+ last_clause_ = kWillRepeatedly;
+ repeated_action_specified_ = true;
+
+ repeated_action_ = action;
+ if (!cardinality_specified()) {
+ set_cardinality(AtLeast(static_cast<int>(untyped_actions_.size())));
+ }
+
+ // Now that no more action clauses can be specified, we check
+ // whether their count makes sense.
+ CheckActionCountIfNotDone();
+ return *this;
+ }
+
+ // Implements the .RetiresOnSaturation() clause.
+ TypedExpectation& RetiresOnSaturation() {
+ ExpectSpecProperty(last_clause_ < kRetiresOnSaturation,
+ ".RetiresOnSaturation() cannot appear "
+ "more than once.");
+ last_clause_ = kRetiresOnSaturation;
+ retires_on_saturation_ = true;
+
+ // Now that no more action clauses can be specified, we check
+ // whether their count makes sense.
+ CheckActionCountIfNotDone();
+ return *this;
+ }
+
+ // Returns the matchers for the arguments as specified inside the
+ // EXPECT_CALL() macro.
+ const ArgumentMatcherTuple& matchers() const {
+ return matchers_;
+ }
+
+ // Returns the matcher specified by the .With() clause.
+ const Matcher<const ArgumentTuple&>& extra_matcher() const {
+ return extra_matcher_;
+ }
+
+ // Returns the action specified by the .WillRepeatedly() clause.
+ const Action<F>& repeated_action() const { return repeated_action_; }
+
+ // If this mock method has an extra matcher (i.e. .With(matcher)),
+ // describes it to the ostream.
+ virtual void MaybeDescribeExtraMatcherTo(::std::ostream* os) {
+ if (extra_matcher_specified_) {
+ *os << " Expected args: ";
+ extra_matcher_.DescribeTo(os);
+ *os << "\n";
+ }
+ }
+
+ private:
+ template <typename Function>
+ friend class FunctionMockerBase;
+
+ // Returns an Expectation object that references and co-owns this
+ // expectation.
+ virtual Expectation GetHandle() {
+ return owner_->GetHandleOf(this);
+ }
+
+ // The following methods will be called only after the EXPECT_CALL()
+ // statement finishes and when the current thread holds
+ // g_gmock_mutex.
+
+ // Returns true iff this expectation matches the given arguments.
+ // L >= g_gmock_mutex
+ bool Matches(const ArgumentTuple& args) const {
+ g_gmock_mutex.AssertHeld();
+ return TupleMatches(matchers_, args) && extra_matcher_.Matches(args);
+ }
+
+ // Returns true iff this expectation should handle the given arguments.
+ // L >= g_gmock_mutex
+ bool ShouldHandleArguments(const ArgumentTuple& args) const {
+ g_gmock_mutex.AssertHeld();
+
+ // In case the action count wasn't checked when the expectation
+ // was defined (e.g. if this expectation has no WillRepeatedly()
+ // or RetiresOnSaturation() clause), we check it when the
+ // expectation is used for the first time.
+ CheckActionCountIfNotDone();
+ return !is_retired() && AllPrerequisitesAreSatisfied() && Matches(args);
+ }
+
+ // Describes the result of matching the arguments against this
+ // expectation to the given ostream.
+ // L >= g_gmock_mutex
+ void ExplainMatchResultTo(const ArgumentTuple& args,
+ ::std::ostream* os) const {
+ g_gmock_mutex.AssertHeld();
+
+ if (is_retired()) {
+ *os << " Expected: the expectation is active\n"
+ << " Actual: it is retired\n";
+ } else if (!Matches(args)) {
+ if (!TupleMatches(matchers_, args)) {
+ ExplainMatchFailureTupleTo(matchers_, args, os);
+ }
+ StringMatchResultListener listener;
+ if (!extra_matcher_.MatchAndExplain(args, &listener)) {
+ *os << " Expected args: ";
+ extra_matcher_.DescribeTo(os);
+ *os << "\n Actual: don't match";
+
+ internal::PrintIfNotEmpty(listener.str(), os);
+ *os << "\n";
+ }
+ } else if (!AllPrerequisitesAreSatisfied()) {
+ *os << " Expected: all pre-requisites are satisfied\n"
+ << " Actual: the following immediate pre-requisites "
+ << "are not satisfied:\n";
+ ExpectationSet unsatisfied_prereqs;
+ FindUnsatisfiedPrerequisites(&unsatisfied_prereqs);
+ int i = 0;
+ for (ExpectationSet::const_iterator it = unsatisfied_prereqs.begin();
+ it != unsatisfied_prereqs.end(); ++it) {
+ it->expectation_base()->DescribeLocationTo(os);
+ *os << "pre-requisite #" << i++ << "\n";
+ }
+ *os << " (end of pre-requisites)\n";
+ } else {
+ // This line is here just for completeness' sake. It will never
+ // be executed as currently the ExplainMatchResultTo() function
+ // is called only when the mock function call does NOT match the
+ // expectation.
+ *os << "The call matches the expectation.\n";
+ }
+ }
+
+ // Returns the action that should be taken for the current invocation.
+ // L >= g_gmock_mutex
+ const Action<F>& GetCurrentAction(const FunctionMockerBase<F>* mocker,
+ const ArgumentTuple& args) const {
+ g_gmock_mutex.AssertHeld();
+ const int count = call_count();
+ Assert(count >= 1, __FILE__, __LINE__,
+ "call_count() is <= 0 when GetCurrentAction() is "
+ "called - this should never happen.");
+
+ const int action_count = static_cast<int>(untyped_actions_.size());
+ if (action_count > 0 && !repeated_action_specified_ &&
+ count > action_count) {
+ // If there is at least one WillOnce() and no WillRepeatedly(),
+ // we warn the user when the WillOnce() clauses ran out.
+ ::std::stringstream ss;
+ DescribeLocationTo(&ss);
+ ss << "Actions ran out in " << source_text() << "...\n"
+ << "Called " << count << " times, but only "
+ << action_count << " WillOnce()"
+ << (action_count == 1 ? " is" : "s are") << " specified - ";
+ mocker->DescribeDefaultActionTo(args, &ss);
+ Log(WARNING, ss.str(), 1);
+ }
+
+ return count <= action_count ?
+ *static_cast<const Action<F>*>(untyped_actions_[count - 1]) :
+ repeated_action();
+ }
+
+ // Given the arguments of a mock function call, if the call will
+ // over-saturate this expectation, returns the default action;
+ // otherwise, returns the next action in this expectation. Also
+ // describes *what* happened to 'what', and explains *why* Google
+ // Mock does it to 'why'. This method is not const as it calls
+ // IncrementCallCount(). A return value of NULL means the default
+ // action.
+ // L >= g_gmock_mutex
+ const Action<F>* GetActionForArguments(const FunctionMockerBase<F>* mocker,
+ const ArgumentTuple& args,
+ ::std::ostream* what,
+ ::std::ostream* why) {
+ g_gmock_mutex.AssertHeld();
+ if (IsSaturated()) {
+ // We have an excessive call.
+ IncrementCallCount();
+ *what << "Mock function called more times than expected - ";
+ mocker->DescribeDefaultActionTo(args, what);
+ DescribeCallCountTo(why);
+
+ // TODO(wan@google.com): allow the user to control whether
+ // unexpected calls should fail immediately or continue using a
+ // flag --gmock_unexpected_calls_are_fatal.
+ return NULL;
+ }
+
+ IncrementCallCount();
+ RetireAllPreRequisites();
+
+ if (retires_on_saturation_ && IsSaturated()) {
+ Retire();
+ }
+
+ // Must be done after IncrementCount()!
+ *what << "Mock function call matches " << source_text() <<"...\n";
+ return &(GetCurrentAction(mocker, args));
+ }
+
+ // All the fields below won't change once the EXPECT_CALL()
+ // statement finishes.
+ FunctionMockerBase<F>* const owner_;
+ ArgumentMatcherTuple matchers_;
+ Matcher<const ArgumentTuple&> extra_matcher_;
+ Action<F> repeated_action_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TypedExpectation);
+}; // class TypedExpectation
+
+// A MockSpec object is used by ON_CALL() or EXPECT_CALL() for
+// specifying the default behavior of, or expectation on, a mock
+// function.
+
+// Note: class MockSpec really belongs to the ::testing namespace.
+// However if we define it in ::testing, MSVC will complain when
+// classes in ::testing::internal declare it as a friend class
+// template. To workaround this compiler bug, we define MockSpec in
+// ::testing::internal and import it into ::testing.
+
+// Logs a message including file and line number information.
+void LogWithLocation(testing::internal::LogSeverity severity,
+ const char* file, int line,
+ const string& message);
+
+template <typename F>
+class MockSpec {
+ public:
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+ typedef typename internal::Function<F>::ArgumentMatcherTuple
+ ArgumentMatcherTuple;
+
+ // Constructs a MockSpec object, given the function mocker object
+ // that the spec is associated with.
+ explicit MockSpec(internal::FunctionMockerBase<F>* function_mocker)
+ : function_mocker_(function_mocker) {}
+
+ // Adds a new default action spec to the function mocker and returns
+ // the newly created spec.
+ internal::OnCallSpec<F>& InternalDefaultActionSetAt(
+ const char* file, int line, const char* obj, const char* call) {
+ LogWithLocation(internal::INFO, file, line,
+ string("ON_CALL(") + obj + ", " + call + ") invoked");
+ return function_mocker_->AddNewOnCallSpec(file, line, matchers_);
+ }
+
+ // Adds a new expectation spec to the function mocker and returns
+ // the newly created spec.
+ internal::TypedExpectation<F>& InternalExpectedAt(
+ const char* file, int line, const char* obj, const char* call) {
+ const string source_text(string("EXPECT_CALL(") + obj + ", " + call + ")");
+ LogWithLocation(internal::INFO, file, line, source_text + " invoked");
+ return function_mocker_->AddNewExpectation(
+ file, line, source_text, matchers_);
+ }
+
+ private:
+ template <typename Function>
+ friend class internal::FunctionMocker;
+
+ void SetMatchers(const ArgumentMatcherTuple& matchers) {
+ matchers_ = matchers;
+ }
+
+ // The function mocker that owns this spec.
+ internal::FunctionMockerBase<F>* const function_mocker_;
+ // The argument matchers specified in the spec.
+ ArgumentMatcherTuple matchers_;
+
+ GTEST_DISALLOW_ASSIGN_(MockSpec);
+}; // class MockSpec
+
+// MSVC warns about using 'this' in base member initializer list, so
+// we need to temporarily disable the warning. We have to do it for
+// the entire class to suppress the warning, even though it's about
+// the constructor only.
+
+#ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4355) // Temporarily disables warning 4355.
+#endif // _MSV_VER
+
+// C++ treats the void type specially. For example, you cannot define
+// a void-typed variable or pass a void value to a function.
+// ActionResultHolder<T> holds a value of type T, where T must be a
+// copyable type or void (T doesn't need to be default-constructable).
+// It hides the syntactic difference between void and other types, and
+// is used to unify the code for invoking both void-returning and
+// non-void-returning mock functions.
+
+// Untyped base class for ActionResultHolder<T>.
+class UntypedActionResultHolderBase {
+ public:
+ virtual ~UntypedActionResultHolderBase() {}
+
+ // Prints the held value as an action's result to os.
+ virtual void PrintAsActionResult(::std::ostream* os) const = 0;
+};
+
+// This generic definition is used when T is not void.
+template <typename T>
+class ActionResultHolder : public UntypedActionResultHolderBase {
+ public:
+ explicit ActionResultHolder(T a_value) : value_(a_value) {}
+
+ // The compiler-generated copy constructor and assignment operator
+ // are exactly what we need, so we don't need to define them.
+
+ // Returns the held value and deletes this object.
+ T GetValueAndDelete() const {
+ T retval(value_);
+ delete this;
+ return retval;
+ }
+
+ // Prints the held value as an action's result to os.
+ virtual void PrintAsActionResult(::std::ostream* os) const {
+ *os << "\n Returns: ";
+ // T may be a reference type, so we don't use UniversalPrint().
+ UniversalPrinter<T>::Print(value_, os);
+ }
+
+ // Performs the given mock function's default action and returns the
+ // result in a new-ed ActionResultHolder.
+ template <typename F>
+ static ActionResultHolder* PerformDefaultAction(
+ const FunctionMockerBase<F>* func_mocker,
+ const typename Function<F>::ArgumentTuple& args,
+ const string& call_description) {
+ return new ActionResultHolder(
+ func_mocker->PerformDefaultAction(args, call_description));
+ }
+
+ // Performs the given action and returns the result in a new-ed
+ // ActionResultHolder.
+ template <typename F>
+ static ActionResultHolder*
+ PerformAction(const Action<F>& action,
+ const typename Function<F>::ArgumentTuple& args) {
+ return new ActionResultHolder(action.Perform(args));
+ }
+
+ private:
+ T value_;
+
+ // T could be a reference type, so = isn't supported.
+ GTEST_DISALLOW_ASSIGN_(ActionResultHolder);
+};
+
+// Specialization for T = void.
+template <>
+class ActionResultHolder<void> : public UntypedActionResultHolderBase {
+ public:
+ void GetValueAndDelete() const { delete this; }
+
+ virtual void PrintAsActionResult(::std::ostream* /* os */) const {}
+
+ // Performs the given mock function's default action and returns NULL;
+ template <typename F>
+ static ActionResultHolder* PerformDefaultAction(
+ const FunctionMockerBase<F>* func_mocker,
+ const typename Function<F>::ArgumentTuple& args,
+ const string& call_description) {
+ func_mocker->PerformDefaultAction(args, call_description);
+ return NULL;
+ }
+
+ // Performs the given action and returns NULL.
+ template <typename F>
+ static ActionResultHolder* PerformAction(
+ const Action<F>& action,
+ const typename Function<F>::ArgumentTuple& args) {
+ action.Perform(args);
+ return NULL;
+ }
+};
+
+// The base of the function mocker class for the given function type.
+// We put the methods in this class instead of its child to avoid code
+// bloat.
+template <typename F>
+class FunctionMockerBase : public UntypedFunctionMockerBase {
+ public:
+ typedef typename Function<F>::Result Result;
+ typedef typename Function<F>::ArgumentTuple ArgumentTuple;
+ typedef typename Function<F>::ArgumentMatcherTuple ArgumentMatcherTuple;
+
+ FunctionMockerBase() : current_spec_(this) {}
+
+ // The destructor verifies that all expectations on this mock
+ // function have been satisfied. If not, it will report Google Test
+ // non-fatal failures for the violations.
+ // L < g_gmock_mutex
+ virtual ~FunctionMockerBase() {
+ MutexLock l(&g_gmock_mutex);
+ VerifyAndClearExpectationsLocked();
+ Mock::UnregisterLocked(this);
+ ClearDefaultActionsLocked();
+ }
+
+ // Returns the ON_CALL spec that matches this mock function with the
+ // given arguments; returns NULL if no matching ON_CALL is found.
+ // L = *
+ const OnCallSpec<F>* FindOnCallSpec(
+ const ArgumentTuple& args) const {
+ for (UntypedOnCallSpecs::const_reverse_iterator it
+ = untyped_on_call_specs_.rbegin();
+ it != untyped_on_call_specs_.rend(); ++it) {
+ const OnCallSpec<F>* spec = static_cast<const OnCallSpec<F>*>(*it);
+ if (spec->Matches(args))
+ return spec;
+ }
+
+ return NULL;
+ }
+
+ // Performs the default action of this mock function on the given arguments
+ // and returns the result. Asserts with a helpful call descrption if there is
+ // no valid return value. This method doesn't depend on the mutable state of
+ // this object, and thus can be called concurrently without locking.
+ // L = *
+ Result PerformDefaultAction(const ArgumentTuple& args,
+ const string& call_description) const {
+ const OnCallSpec<F>* const spec =
+ this->FindOnCallSpec(args);
+ if (spec != NULL) {
+ return spec->GetAction().Perform(args);
+ }
+ Assert(DefaultValue<Result>::Exists(), "", -1,
+ call_description + "\n The mock function has no default action "
+ "set, and its return type has no default value set.");
+ return DefaultValue<Result>::Get();
+ }
+
+ // Performs the default action with the given arguments and returns
+ // the action's result. The call description string will be used in
+ // the error message to describe the call in the case the default
+ // action fails. The caller is responsible for deleting the result.
+ // L = *
+ virtual UntypedActionResultHolderBase* UntypedPerformDefaultAction(
+ const void* untyped_args, // must point to an ArgumentTuple
+ const string& call_description) const {
+ const ArgumentTuple& args =
+ *static_cast<const ArgumentTuple*>(untyped_args);
+ return ResultHolder::PerformDefaultAction(this, args, call_description);
+ }
+
+ // Performs the given action with the given arguments and returns
+ // the action's result. The caller is responsible for deleting the
+ // result.
+ // L = *
+ virtual UntypedActionResultHolderBase* UntypedPerformAction(
+ const void* untyped_action, const void* untyped_args) const {
+ // Make a copy of the action before performing it, in case the
+ // action deletes the mock object (and thus deletes itself).
+ const Action<F> action = *static_cast<const Action<F>*>(untyped_action);
+ const ArgumentTuple& args =
+ *static_cast<const ArgumentTuple*>(untyped_args);
+ return ResultHolder::PerformAction(action, args);
+ }
+
+ // Implements UntypedFunctionMockerBase::ClearDefaultActionsLocked():
+ // clears the ON_CALL()s set on this mock function.
+ // L >= g_gmock_mutex
+ virtual void ClearDefaultActionsLocked() {
+ g_gmock_mutex.AssertHeld();
+ for (UntypedOnCallSpecs::const_iterator it =
+ untyped_on_call_specs_.begin();
+ it != untyped_on_call_specs_.end(); ++it) {
+ delete static_cast<const OnCallSpec<F>*>(*it);
+ }
+ untyped_on_call_specs_.clear();
+ }
+
+ protected:
+ template <typename Function>
+ friend class MockSpec;
+
+ typedef ActionResultHolder<Result> ResultHolder;
+
+ // Returns the result of invoking this mock function with the given
+ // arguments. This function can be safely called from multiple
+ // threads concurrently.
+ // L < g_gmock_mutex
+ Result InvokeWith(const ArgumentTuple& args) {
+ return static_cast<const ResultHolder*>(
+ this->UntypedInvokeWith(&args))->GetValueAndDelete();
+ }
+
+ // Adds and returns a default action spec for this mock function.
+ // L < g_gmock_mutex
+ OnCallSpec<F>& AddNewOnCallSpec(
+ const char* file, int line,
+ const ArgumentMatcherTuple& m) {
+ Mock::RegisterUseByOnCallOrExpectCall(MockObject(), file, line);
+ OnCallSpec<F>* const on_call_spec = new OnCallSpec<F>(file, line, m);
+ untyped_on_call_specs_.push_back(on_call_spec);
+ return *on_call_spec;
+ }
+
+ // Adds and returns an expectation spec for this mock function.
+ // L < g_gmock_mutex
+ TypedExpectation<F>& AddNewExpectation(
+ const char* file,
+ int line,
+ const string& source_text,
+ const ArgumentMatcherTuple& m) {
+ Mock::RegisterUseByOnCallOrExpectCall(MockObject(), file, line);
+ TypedExpectation<F>* const expectation =
+ new TypedExpectation<F>(this, file, line, source_text, m);
+ const linked_ptr<ExpectationBase> untyped_expectation(expectation);
+ untyped_expectations_.push_back(untyped_expectation);
+
+ // Adds this expectation into the implicit sequence if there is one.
+ Sequence* const implicit_sequence = g_gmock_implicit_sequence.get();
+ if (implicit_sequence != NULL) {
+ implicit_sequence->AddExpectation(Expectation(untyped_expectation));
+ }
+
+ return *expectation;
+ }
+
+ // The current spec (either default action spec or expectation spec)
+ // being described on this function mocker.
+ MockSpec<F>& current_spec() { return current_spec_; }
+
+ private:
+ template <typename Func> friend class TypedExpectation;
+
+ // Some utilities needed for implementing UntypedInvokeWith().
+
+ // Describes what default action will be performed for the given
+ // arguments.
+ // L = *
+ void DescribeDefaultActionTo(const ArgumentTuple& args,
+ ::std::ostream* os) const {
+ const OnCallSpec<F>* const spec = FindOnCallSpec(args);
+
+ if (spec == NULL) {
+ *os << (internal::type_equals<Result, void>::value ?
+ "returning directly.\n" :
+ "returning default value.\n");
+ } else {
+ *os << "taking default action specified at:\n"
+ << FormatFileLocation(spec->file(), spec->line()) << "\n";
+ }
+ }
+
+ // Writes a message that the call is uninteresting (i.e. neither
+ // explicitly expected nor explicitly unexpected) to the given
+ // ostream.
+ // L < g_gmock_mutex
+ virtual void UntypedDescribeUninterestingCall(const void* untyped_args,
+ ::std::ostream* os) const {
+ const ArgumentTuple& args =
+ *static_cast<const ArgumentTuple*>(untyped_args);
+ *os << "Uninteresting mock function call - ";
+ DescribeDefaultActionTo(args, os);
+ *os << " Function call: " << Name();
+ UniversalPrint(args, os);
+ }
+
+ // Returns the expectation that matches the given function arguments
+ // (or NULL is there's no match); when a match is found,
+ // untyped_action is set to point to the action that should be
+ // performed (or NULL if the action is "do default"), and
+ // is_excessive is modified to indicate whether the call exceeds the
+ // expected number.
+ //
+ // Critical section: We must find the matching expectation and the
+ // corresponding action that needs to be taken in an ATOMIC
+ // transaction. Otherwise another thread may call this mock
+ // method in the middle and mess up the state.
+ //
+ // However, performing the action has to be left out of the critical
+ // section. The reason is that we have no control on what the
+ // action does (it can invoke an arbitrary user function or even a
+ // mock function) and excessive locking could cause a dead lock.
+ // L < g_gmock_mutex
+ virtual const ExpectationBase* UntypedFindMatchingExpectation(
+ const void* untyped_args,
+ const void** untyped_action, bool* is_excessive,
+ ::std::ostream* what, ::std::ostream* why) {
+ const ArgumentTuple& args =
+ *static_cast<const ArgumentTuple*>(untyped_args);
+ MutexLock l(&g_gmock_mutex);
+ TypedExpectation<F>* exp = this->FindMatchingExpectationLocked(args);
+ if (exp == NULL) { // A match wasn't found.
+ this->FormatUnexpectedCallMessageLocked(args, what, why);
+ return NULL;
+ }
+
+ // This line must be done before calling GetActionForArguments(),
+ // which will increment the call count for *exp and thus affect
+ // its saturation status.
+ *is_excessive = exp->IsSaturated();
+ const Action<F>* action = exp->GetActionForArguments(this, args, what, why);
+ if (action != NULL && action->IsDoDefault())
+ action = NULL; // Normalize "do default" to NULL.
+ *untyped_action = action;
+ return exp;
+ }
+
+ // Prints the given function arguments to the ostream.
+ virtual void UntypedPrintArgs(const void* untyped_args,
+ ::std::ostream* os) const {
+ const ArgumentTuple& args =
+ *static_cast<const ArgumentTuple*>(untyped_args);
+ UniversalPrint(args, os);
+ }
+
+ // Returns the expectation that matches the arguments, or NULL if no
+ // expectation matches them.
+ // L >= g_gmock_mutex
+ TypedExpectation<F>* FindMatchingExpectationLocked(
+ const ArgumentTuple& args) const {
+ g_gmock_mutex.AssertHeld();
+ for (typename UntypedExpectations::const_reverse_iterator it =
+ untyped_expectations_.rbegin();
+ it != untyped_expectations_.rend(); ++it) {
+ TypedExpectation<F>* const exp =
+ static_cast<TypedExpectation<F>*>(it->get());
+ if (exp->ShouldHandleArguments(args)) {
+ return exp;
+ }
+ }
+ return NULL;
+ }
+
+ // Returns a message that the arguments don't match any expectation.
+ // L >= g_gmock_mutex
+ void FormatUnexpectedCallMessageLocked(const ArgumentTuple& args,
+ ::std::ostream* os,
+ ::std::ostream* why) const {
+ g_gmock_mutex.AssertHeld();
+ *os << "\nUnexpected mock function call - ";
+ DescribeDefaultActionTo(args, os);
+ PrintTriedExpectationsLocked(args, why);
+ }
+
+ // Prints a list of expectations that have been tried against the
+ // current mock function call.
+ // L >= g_gmock_mutex
+ void PrintTriedExpectationsLocked(const ArgumentTuple& args,
+ ::std::ostream* why) const {
+ g_gmock_mutex.AssertHeld();
+ const int count = static_cast<int>(untyped_expectations_.size());
+ *why << "Google Mock tried the following " << count << " "
+ << (count == 1 ? "expectation, but it didn't match" :
+ "expectations, but none matched")
+ << ":\n";
+ for (int i = 0; i < count; i++) {
+ TypedExpectation<F>* const expectation =
+ static_cast<TypedExpectation<F>*>(untyped_expectations_[i].get());
+ *why << "\n";
+ expectation->DescribeLocationTo(why);
+ if (count > 1) {
+ *why << "tried expectation #" << i << ": ";
+ }
+ *why << expectation->source_text() << "...\n";
+ expectation->ExplainMatchResultTo(args, why);
+ expectation->DescribeCallCountTo(why);
+ }
+ }
+
+ // The current spec (either default action spec or expectation spec)
+ // being described on this function mocker.
+ MockSpec<F> current_spec_;
+
+ // There is no generally useful and implementable semantics of
+ // copying a mock object, so copying a mock is usually a user error.
+ // Thus we disallow copying function mockers. If the user really
+ // wants to copy a mock object, he should implement his own copy
+ // operation, for example:
+ //
+ // class MockFoo : public Foo {
+ // public:
+ // // Defines a copy constructor explicitly.
+ // MockFoo(const MockFoo& src) {}
+ // ...
+ // };
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(FunctionMockerBase);
+}; // class FunctionMockerBase
+
+#ifdef _MSC_VER
+# pragma warning(pop) // Restores the warning state.
+#endif // _MSV_VER
+
+// Implements methods of FunctionMockerBase.
+
+// Verifies that all expectations on this mock function have been
+// satisfied. Reports one or more Google Test non-fatal failures and
+// returns false if not.
+// L >= g_gmock_mutex
+
+// Reports an uninteresting call (whose description is in msg) in the
+// manner specified by 'reaction'.
+void ReportUninterestingCall(CallReaction reaction, const string& msg);
+
+} // namespace internal
+
+// The style guide prohibits "using" statements in a namespace scope
+// inside a header file. However, the MockSpec class template is
+// meant to be defined in the ::testing namespace. The following line
+// is just a trick for working around a bug in MSVC 8.0, which cannot
+// handle it if we define MockSpec in ::testing.
+using internal::MockSpec;
+
+// Const(x) is a convenient function for obtaining a const reference
+// to x. This is useful for setting expectations on an overloaded
+// const mock method, e.g.
+//
+// class MockFoo : public FooInterface {
+// public:
+// MOCK_METHOD0(Bar, int());
+// MOCK_CONST_METHOD0(Bar, int&());
+// };
+//
+// MockFoo foo;
+// // Expects a call to non-const MockFoo::Bar().
+// EXPECT_CALL(foo, Bar());
+// // Expects a call to const MockFoo::Bar().
+// EXPECT_CALL(Const(foo), Bar());
+template <typename T>
+inline const T& Const(const T& x) { return x; }
+
+// Constructs an Expectation object that references and co-owns exp.
+inline Expectation::Expectation(internal::ExpectationBase& exp) // NOLINT
+ : expectation_base_(exp.GetHandle().expectation_base()) {}
+
+} // namespace testing
+
+// A separate macro is required to avoid compile errors when the name
+// of the method used in call is a result of macro expansion.
+// See CompilesWithMethodNameExpandedFromMacro tests in
+// internal/gmock-spec-builders_test.cc for more details.
+#define GMOCK_ON_CALL_IMPL_(obj, call) \
+ ((obj).gmock_##call).InternalDefaultActionSetAt(__FILE__, __LINE__, \
+ #obj, #call)
+#define ON_CALL(obj, call) GMOCK_ON_CALL_IMPL_(obj, call)
+
+#define GMOCK_EXPECT_CALL_IMPL_(obj, call) \
+ ((obj).gmock_##call).InternalExpectedAt(__FILE__, __LINE__, #obj, #call)
+#define EXPECT_CALL(obj, call) GMOCK_EXPECT_CALL_IMPL_(obj, call)
+
+#endif // GMOCK_INCLUDE_GMOCK_GMOCK_SPEC_BUILDERS_H_
+
+namespace testing {
+namespace internal {
+
+template <typename F>
+class FunctionMockerBase;
+
+// Note: class FunctionMocker really belongs to the ::testing
+// namespace. However if we define it in ::testing, MSVC will
+// complain when classes in ::testing::internal declare it as a
+// friend class template. To workaround this compiler bug, we define
+// FunctionMocker in ::testing::internal and import it into ::testing.
+template <typename F>
+class FunctionMocker;
+
+template <typename R>
+class FunctionMocker<R()> : public
+ internal::FunctionMockerBase<R()> {
+ public:
+ typedef R F();
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ MockSpec<F>& With() {
+ return this->current_spec();
+ }
+
+ R Invoke() {
+ // Even though gcc and MSVC don't enforce it, 'this->' is required
+ // by the C++ standard [14.6.4] here, as the base class type is
+ // dependent on the template argument (and thus shouldn't be
+ // looked into when resolving InvokeWith).
+ return this->InvokeWith(ArgumentTuple());
+ }
+};
+
+template <typename R, typename A1>
+class FunctionMocker<R(A1)> : public
+ internal::FunctionMockerBase<R(A1)> {
+ public:
+ typedef R F(A1);
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ MockSpec<F>& With(const Matcher<A1>& m1) {
+ this->current_spec().SetMatchers(::std::tr1::make_tuple(m1));
+ return this->current_spec();
+ }
+
+ R Invoke(A1 a1) {
+ // Even though gcc and MSVC don't enforce it, 'this->' is required
+ // by the C++ standard [14.6.4] here, as the base class type is
+ // dependent on the template argument (and thus shouldn't be
+ // looked into when resolving InvokeWith).
+ return this->InvokeWith(ArgumentTuple(a1));
+ }
+};
+
+template <typename R, typename A1, typename A2>
+class FunctionMocker<R(A1, A2)> : public
+ internal::FunctionMockerBase<R(A1, A2)> {
+ public:
+ typedef R F(A1, A2);
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2) {
+ this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2));
+ return this->current_spec();
+ }
+
+ R Invoke(A1 a1, A2 a2) {
+ // Even though gcc and MSVC don't enforce it, 'this->' is required
+ // by the C++ standard [14.6.4] here, as the base class type is
+ // dependent on the template argument (and thus shouldn't be
+ // looked into when resolving InvokeWith).
+ return this->InvokeWith(ArgumentTuple(a1, a2));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3>
+class FunctionMocker<R(A1, A2, A3)> : public
+ internal::FunctionMockerBase<R(A1, A2, A3)> {
+ public:
+ typedef R F(A1, A2, A3);
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+ const Matcher<A3>& m3) {
+ this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3));
+ return this->current_spec();
+ }
+
+ R Invoke(A1 a1, A2 a2, A3 a3) {
+ // Even though gcc and MSVC don't enforce it, 'this->' is required
+ // by the C++ standard [14.6.4] here, as the base class type is
+ // dependent on the template argument (and thus shouldn't be
+ // looked into when resolving InvokeWith).
+ return this->InvokeWith(ArgumentTuple(a1, a2, a3));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4>
+class FunctionMocker<R(A1, A2, A3, A4)> : public
+ internal::FunctionMockerBase<R(A1, A2, A3, A4)> {
+ public:
+ typedef R F(A1, A2, A3, A4);
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+ const Matcher<A3>& m3, const Matcher<A4>& m4) {
+ this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4));
+ return this->current_spec();
+ }
+
+ R Invoke(A1 a1, A2 a2, A3 a3, A4 a4) {
+ // Even though gcc and MSVC don't enforce it, 'this->' is required
+ // by the C++ standard [14.6.4] here, as the base class type is
+ // dependent on the template argument (and thus shouldn't be
+ // looked into when resolving InvokeWith).
+ return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5>
+class FunctionMocker<R(A1, A2, A3, A4, A5)> : public
+ internal::FunctionMockerBase<R(A1, A2, A3, A4, A5)> {
+ public:
+ typedef R F(A1, A2, A3, A4, A5);
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+ const Matcher<A3>& m3, const Matcher<A4>& m4, const Matcher<A5>& m5) {
+ this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4,
+ m5));
+ return this->current_spec();
+ }
+
+ R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) {
+ // Even though gcc and MSVC don't enforce it, 'this->' is required
+ // by the C++ standard [14.6.4] here, as the base class type is
+ // dependent on the template argument (and thus shouldn't be
+ // looked into when resolving InvokeWith).
+ return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6>
+class FunctionMocker<R(A1, A2, A3, A4, A5, A6)> : public
+ internal::FunctionMockerBase<R(A1, A2, A3, A4, A5, A6)> {
+ public:
+ typedef R F(A1, A2, A3, A4, A5, A6);
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+ const Matcher<A3>& m3, const Matcher<A4>& m4, const Matcher<A5>& m5,
+ const Matcher<A6>& m6) {
+ this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4, m5,
+ m6));
+ return this->current_spec();
+ }
+
+ R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) {
+ // Even though gcc and MSVC don't enforce it, 'this->' is required
+ // by the C++ standard [14.6.4] here, as the base class type is
+ // dependent on the template argument (and thus shouldn't be
+ // looked into when resolving InvokeWith).
+ return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7>
+class FunctionMocker<R(A1, A2, A3, A4, A5, A6, A7)> : public
+ internal::FunctionMockerBase<R(A1, A2, A3, A4, A5, A6, A7)> {
+ public:
+ typedef R F(A1, A2, A3, A4, A5, A6, A7);
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+ const Matcher<A3>& m3, const Matcher<A4>& m4, const Matcher<A5>& m5,
+ const Matcher<A6>& m6, const Matcher<A7>& m7) {
+ this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4, m5,
+ m6, m7));
+ return this->current_spec();
+ }
+
+ R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) {
+ // Even though gcc and MSVC don't enforce it, 'this->' is required
+ // by the C++ standard [14.6.4] here, as the base class type is
+ // dependent on the template argument (and thus shouldn't be
+ // looked into when resolving InvokeWith).
+ return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6, a7));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7, typename A8>
+class FunctionMocker<R(A1, A2, A3, A4, A5, A6, A7, A8)> : public
+ internal::FunctionMockerBase<R(A1, A2, A3, A4, A5, A6, A7, A8)> {
+ public:
+ typedef R F(A1, A2, A3, A4, A5, A6, A7, A8);
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+ const Matcher<A3>& m3, const Matcher<A4>& m4, const Matcher<A5>& m5,
+ const Matcher<A6>& m6, const Matcher<A7>& m7, const Matcher<A8>& m8) {
+ this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4, m5,
+ m6, m7, m8));
+ return this->current_spec();
+ }
+
+ R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8) {
+ // Even though gcc and MSVC don't enforce it, 'this->' is required
+ // by the C++ standard [14.6.4] here, as the base class type is
+ // dependent on the template argument (and thus shouldn't be
+ // looked into when resolving InvokeWith).
+ return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6, a7, a8));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7, typename A8, typename A9>
+class FunctionMocker<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)> : public
+ internal::FunctionMockerBase<R(A1, A2, A3, A4, A5, A6, A7, A8, A9)> {
+ public:
+ typedef R F(A1, A2, A3, A4, A5, A6, A7, A8, A9);
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+ const Matcher<A3>& m3, const Matcher<A4>& m4, const Matcher<A5>& m5,
+ const Matcher<A6>& m6, const Matcher<A7>& m7, const Matcher<A8>& m8,
+ const Matcher<A9>& m9) {
+ this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4, m5,
+ m6, m7, m8, m9));
+ return this->current_spec();
+ }
+
+ R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8, A9 a9) {
+ // Even though gcc and MSVC don't enforce it, 'this->' is required
+ // by the C++ standard [14.6.4] here, as the base class type is
+ // dependent on the template argument (and thus shouldn't be
+ // looked into when resolving InvokeWith).
+ return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6, a7, a8, a9));
+ }
+};
+
+template <typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, typename A6, typename A7, typename A8, typename A9,
+ typename A10>
+class FunctionMocker<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)> : public
+ internal::FunctionMockerBase<R(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)> {
+ public:
+ typedef R F(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10);
+ typedef typename internal::Function<F>::ArgumentTuple ArgumentTuple;
+
+ MockSpec<F>& With(const Matcher<A1>& m1, const Matcher<A2>& m2,
+ const Matcher<A3>& m3, const Matcher<A4>& m4, const Matcher<A5>& m5,
+ const Matcher<A6>& m6, const Matcher<A7>& m7, const Matcher<A8>& m8,
+ const Matcher<A9>& m9, const Matcher<A10>& m10) {
+ this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4, m5,
+ m6, m7, m8, m9, m10));
+ return this->current_spec();
+ }
+
+ R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8, A9 a9,
+ A10 a10) {
+ // Even though gcc and MSVC don't enforce it, 'this->' is required
+ // by the C++ standard [14.6.4] here, as the base class type is
+ // dependent on the template argument (and thus shouldn't be
+ // looked into when resolving InvokeWith).
+ return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6, a7, a8, a9,
+ a10));
+ }
+};
+
+} // namespace internal
+
+// The style guide prohibits "using" statements in a namespace scope
+// inside a header file. However, the FunctionMocker class template
+// is meant to be defined in the ::testing namespace. The following
+// line is just a trick for working around a bug in MSVC 8.0, which
+// cannot handle it if we define FunctionMocker in ::testing.
+using internal::FunctionMocker;
+
+// The result type of function type F.
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_RESULT_(tn, F) tn ::testing::internal::Function<F>::Result
+
+// The type of argument N of function type F.
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_ARG_(tn, F, N) tn ::testing::internal::Function<F>::Argument##N
+
+// The matcher type for argument N of function type F.
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_MATCHER_(tn, F, N) const ::testing::Matcher<GMOCK_ARG_(tn, F, N)>&
+
+// The variable for mocking the given method.
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_MOCKER_(arity, constness, Method) \
+ GTEST_CONCAT_TOKEN_(gmock##constness##arity##_##Method##_, __LINE__)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD0_(tn, constness, ct, Method, F) \
+ GMOCK_RESULT_(tn, F) ct Method() constness { \
+ GTEST_COMPILE_ASSERT_(::std::tr1::tuple_size< \
+ tn ::testing::internal::Function<F>::ArgumentTuple>::value == 0, \
+ this_method_does_not_take_0_arguments); \
+ GMOCK_MOCKER_(0, constness, Method).SetOwnerAndName(this, #Method); \
+ return GMOCK_MOCKER_(0, constness, Method).Invoke(); \
+ } \
+ ::testing::MockSpec<F>& \
+ gmock_##Method() constness { \
+ GMOCK_MOCKER_(0, constness, Method).RegisterOwner(this); \
+ return GMOCK_MOCKER_(0, constness, Method).With(); \
+ } \
+ mutable ::testing::FunctionMocker<F> GMOCK_MOCKER_(0, constness, Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD1_(tn, constness, ct, Method, F) \
+ GMOCK_RESULT_(tn, F) ct Method(GMOCK_ARG_(tn, F, 1) gmock_a1) constness { \
+ GTEST_COMPILE_ASSERT_(::std::tr1::tuple_size< \
+ tn ::testing::internal::Function<F>::ArgumentTuple>::value == 1, \
+ this_method_does_not_take_1_argument); \
+ GMOCK_MOCKER_(1, constness, Method).SetOwnerAndName(this, #Method); \
+ return GMOCK_MOCKER_(1, constness, Method).Invoke(gmock_a1); \
+ } \
+ ::testing::MockSpec<F>& \
+ gmock_##Method(GMOCK_MATCHER_(tn, F, 1) gmock_a1) constness { \
+ GMOCK_MOCKER_(1, constness, Method).RegisterOwner(this); \
+ return GMOCK_MOCKER_(1, constness, Method).With(gmock_a1); \
+ } \
+ mutable ::testing::FunctionMocker<F> GMOCK_MOCKER_(1, constness, Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD2_(tn, constness, ct, Method, F) \
+ GMOCK_RESULT_(tn, F) ct Method(GMOCK_ARG_(tn, F, 1) gmock_a1, \
+ GMOCK_ARG_(tn, F, 2) gmock_a2) constness { \
+ GTEST_COMPILE_ASSERT_(::std::tr1::tuple_size< \
+ tn ::testing::internal::Function<F>::ArgumentTuple>::value == 2, \
+ this_method_does_not_take_2_arguments); \
+ GMOCK_MOCKER_(2, constness, Method).SetOwnerAndName(this, #Method); \
+ return GMOCK_MOCKER_(2, constness, Method).Invoke(gmock_a1, gmock_a2); \
+ } \
+ ::testing::MockSpec<F>& \
+ gmock_##Method(GMOCK_MATCHER_(tn, F, 1) gmock_a1, \
+ GMOCK_MATCHER_(tn, F, 2) gmock_a2) constness { \
+ GMOCK_MOCKER_(2, constness, Method).RegisterOwner(this); \
+ return GMOCK_MOCKER_(2, constness, Method).With(gmock_a1, gmock_a2); \
+ } \
+ mutable ::testing::FunctionMocker<F> GMOCK_MOCKER_(2, constness, Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD3_(tn, constness, ct, Method, F) \
+ GMOCK_RESULT_(tn, F) ct Method(GMOCK_ARG_(tn, F, 1) gmock_a1, \
+ GMOCK_ARG_(tn, F, 2) gmock_a2, \
+ GMOCK_ARG_(tn, F, 3) gmock_a3) constness { \
+ GTEST_COMPILE_ASSERT_(::std::tr1::tuple_size< \
+ tn ::testing::internal::Function<F>::ArgumentTuple>::value == 3, \
+ this_method_does_not_take_3_arguments); \
+ GMOCK_MOCKER_(3, constness, Method).SetOwnerAndName(this, #Method); \
+ return GMOCK_MOCKER_(3, constness, Method).Invoke(gmock_a1, gmock_a2, \
+ gmock_a3); \
+ } \
+ ::testing::MockSpec<F>& \
+ gmock_##Method(GMOCK_MATCHER_(tn, F, 1) gmock_a1, \
+ GMOCK_MATCHER_(tn, F, 2) gmock_a2, \
+ GMOCK_MATCHER_(tn, F, 3) gmock_a3) constness { \
+ GMOCK_MOCKER_(3, constness, Method).RegisterOwner(this); \
+ return GMOCK_MOCKER_(3, constness, Method).With(gmock_a1, gmock_a2, \
+ gmock_a3); \
+ } \
+ mutable ::testing::FunctionMocker<F> GMOCK_MOCKER_(3, constness, Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD4_(tn, constness, ct, Method, F) \
+ GMOCK_RESULT_(tn, F) ct Method(GMOCK_ARG_(tn, F, 1) gmock_a1, \
+ GMOCK_ARG_(tn, F, 2) gmock_a2, \
+ GMOCK_ARG_(tn, F, 3) gmock_a3, \
+ GMOCK_ARG_(tn, F, 4) gmock_a4) constness { \
+ GTEST_COMPILE_ASSERT_(::std::tr1::tuple_size< \
+ tn ::testing::internal::Function<F>::ArgumentTuple>::value == 4, \
+ this_method_does_not_take_4_arguments); \
+ GMOCK_MOCKER_(4, constness, Method).SetOwnerAndName(this, #Method); \
+ return GMOCK_MOCKER_(4, constness, Method).Invoke(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4); \
+ } \
+ ::testing::MockSpec<F>& \
+ gmock_##Method(GMOCK_MATCHER_(tn, F, 1) gmock_a1, \
+ GMOCK_MATCHER_(tn, F, 2) gmock_a2, \
+ GMOCK_MATCHER_(tn, F, 3) gmock_a3, \
+ GMOCK_MATCHER_(tn, F, 4) gmock_a4) constness { \
+ GMOCK_MOCKER_(4, constness, Method).RegisterOwner(this); \
+ return GMOCK_MOCKER_(4, constness, Method).With(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4); \
+ } \
+ mutable ::testing::FunctionMocker<F> GMOCK_MOCKER_(4, constness, Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD5_(tn, constness, ct, Method, F) \
+ GMOCK_RESULT_(tn, F) ct Method(GMOCK_ARG_(tn, F, 1) gmock_a1, \
+ GMOCK_ARG_(tn, F, 2) gmock_a2, \
+ GMOCK_ARG_(tn, F, 3) gmock_a3, \
+ GMOCK_ARG_(tn, F, 4) gmock_a4, \
+ GMOCK_ARG_(tn, F, 5) gmock_a5) constness { \
+ GTEST_COMPILE_ASSERT_(::std::tr1::tuple_size< \
+ tn ::testing::internal::Function<F>::ArgumentTuple>::value == 5, \
+ this_method_does_not_take_5_arguments); \
+ GMOCK_MOCKER_(5, constness, Method).SetOwnerAndName(this, #Method); \
+ return GMOCK_MOCKER_(5, constness, Method).Invoke(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4, gmock_a5); \
+ } \
+ ::testing::MockSpec<F>& \
+ gmock_##Method(GMOCK_MATCHER_(tn, F, 1) gmock_a1, \
+ GMOCK_MATCHER_(tn, F, 2) gmock_a2, \
+ GMOCK_MATCHER_(tn, F, 3) gmock_a3, \
+ GMOCK_MATCHER_(tn, F, 4) gmock_a4, \
+ GMOCK_MATCHER_(tn, F, 5) gmock_a5) constness { \
+ GMOCK_MOCKER_(5, constness, Method).RegisterOwner(this); \
+ return GMOCK_MOCKER_(5, constness, Method).With(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4, gmock_a5); \
+ } \
+ mutable ::testing::FunctionMocker<F> GMOCK_MOCKER_(5, constness, Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD6_(tn, constness, ct, Method, F) \
+ GMOCK_RESULT_(tn, F) ct Method(GMOCK_ARG_(tn, F, 1) gmock_a1, \
+ GMOCK_ARG_(tn, F, 2) gmock_a2, \
+ GMOCK_ARG_(tn, F, 3) gmock_a3, \
+ GMOCK_ARG_(tn, F, 4) gmock_a4, \
+ GMOCK_ARG_(tn, F, 5) gmock_a5, \
+ GMOCK_ARG_(tn, F, 6) gmock_a6) constness { \
+ GTEST_COMPILE_ASSERT_(::std::tr1::tuple_size< \
+ tn ::testing::internal::Function<F>::ArgumentTuple>::value == 6, \
+ this_method_does_not_take_6_arguments); \
+ GMOCK_MOCKER_(6, constness, Method).SetOwnerAndName(this, #Method); \
+ return GMOCK_MOCKER_(6, constness, Method).Invoke(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4, gmock_a5, gmock_a6); \
+ } \
+ ::testing::MockSpec<F>& \
+ gmock_##Method(GMOCK_MATCHER_(tn, F, 1) gmock_a1, \
+ GMOCK_MATCHER_(tn, F, 2) gmock_a2, \
+ GMOCK_MATCHER_(tn, F, 3) gmock_a3, \
+ GMOCK_MATCHER_(tn, F, 4) gmock_a4, \
+ GMOCK_MATCHER_(tn, F, 5) gmock_a5, \
+ GMOCK_MATCHER_(tn, F, 6) gmock_a6) constness { \
+ GMOCK_MOCKER_(6, constness, Method).RegisterOwner(this); \
+ return GMOCK_MOCKER_(6, constness, Method).With(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4, gmock_a5, gmock_a6); \
+ } \
+ mutable ::testing::FunctionMocker<F> GMOCK_MOCKER_(6, constness, Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD7_(tn, constness, ct, Method, F) \
+ GMOCK_RESULT_(tn, F) ct Method(GMOCK_ARG_(tn, F, 1) gmock_a1, \
+ GMOCK_ARG_(tn, F, 2) gmock_a2, \
+ GMOCK_ARG_(tn, F, 3) gmock_a3, \
+ GMOCK_ARG_(tn, F, 4) gmock_a4, \
+ GMOCK_ARG_(tn, F, 5) gmock_a5, \
+ GMOCK_ARG_(tn, F, 6) gmock_a6, \
+ GMOCK_ARG_(tn, F, 7) gmock_a7) constness { \
+ GTEST_COMPILE_ASSERT_(::std::tr1::tuple_size< \
+ tn ::testing::internal::Function<F>::ArgumentTuple>::value == 7, \
+ this_method_does_not_take_7_arguments); \
+ GMOCK_MOCKER_(7, constness, Method).SetOwnerAndName(this, #Method); \
+ return GMOCK_MOCKER_(7, constness, Method).Invoke(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7); \
+ } \
+ ::testing::MockSpec<F>& \
+ gmock_##Method(GMOCK_MATCHER_(tn, F, 1) gmock_a1, \
+ GMOCK_MATCHER_(tn, F, 2) gmock_a2, \
+ GMOCK_MATCHER_(tn, F, 3) gmock_a3, \
+ GMOCK_MATCHER_(tn, F, 4) gmock_a4, \
+ GMOCK_MATCHER_(tn, F, 5) gmock_a5, \
+ GMOCK_MATCHER_(tn, F, 6) gmock_a6, \
+ GMOCK_MATCHER_(tn, F, 7) gmock_a7) constness { \
+ GMOCK_MOCKER_(7, constness, Method).RegisterOwner(this); \
+ return GMOCK_MOCKER_(7, constness, Method).With(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7); \
+ } \
+ mutable ::testing::FunctionMocker<F> GMOCK_MOCKER_(7, constness, Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD8_(tn, constness, ct, Method, F) \
+ GMOCK_RESULT_(tn, F) ct Method(GMOCK_ARG_(tn, F, 1) gmock_a1, \
+ GMOCK_ARG_(tn, F, 2) gmock_a2, \
+ GMOCK_ARG_(tn, F, 3) gmock_a3, \
+ GMOCK_ARG_(tn, F, 4) gmock_a4, \
+ GMOCK_ARG_(tn, F, 5) gmock_a5, \
+ GMOCK_ARG_(tn, F, 6) gmock_a6, \
+ GMOCK_ARG_(tn, F, 7) gmock_a7, \
+ GMOCK_ARG_(tn, F, 8) gmock_a8) constness { \
+ GTEST_COMPILE_ASSERT_(::std::tr1::tuple_size< \
+ tn ::testing::internal::Function<F>::ArgumentTuple>::value == 8, \
+ this_method_does_not_take_8_arguments); \
+ GMOCK_MOCKER_(8, constness, Method).SetOwnerAndName(this, #Method); \
+ return GMOCK_MOCKER_(8, constness, Method).Invoke(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8); \
+ } \
+ ::testing::MockSpec<F>& \
+ gmock_##Method(GMOCK_MATCHER_(tn, F, 1) gmock_a1, \
+ GMOCK_MATCHER_(tn, F, 2) gmock_a2, \
+ GMOCK_MATCHER_(tn, F, 3) gmock_a3, \
+ GMOCK_MATCHER_(tn, F, 4) gmock_a4, \
+ GMOCK_MATCHER_(tn, F, 5) gmock_a5, \
+ GMOCK_MATCHER_(tn, F, 6) gmock_a6, \
+ GMOCK_MATCHER_(tn, F, 7) gmock_a7, \
+ GMOCK_MATCHER_(tn, F, 8) gmock_a8) constness { \
+ GMOCK_MOCKER_(8, constness, Method).RegisterOwner(this); \
+ return GMOCK_MOCKER_(8, constness, Method).With(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8); \
+ } \
+ mutable ::testing::FunctionMocker<F> GMOCK_MOCKER_(8, constness, Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD9_(tn, constness, ct, Method, F) \
+ GMOCK_RESULT_(tn, F) ct Method(GMOCK_ARG_(tn, F, 1) gmock_a1, \
+ GMOCK_ARG_(tn, F, 2) gmock_a2, \
+ GMOCK_ARG_(tn, F, 3) gmock_a3, \
+ GMOCK_ARG_(tn, F, 4) gmock_a4, \
+ GMOCK_ARG_(tn, F, 5) gmock_a5, \
+ GMOCK_ARG_(tn, F, 6) gmock_a6, \
+ GMOCK_ARG_(tn, F, 7) gmock_a7, \
+ GMOCK_ARG_(tn, F, 8) gmock_a8, \
+ GMOCK_ARG_(tn, F, 9) gmock_a9) constness { \
+ GTEST_COMPILE_ASSERT_(::std::tr1::tuple_size< \
+ tn ::testing::internal::Function<F>::ArgumentTuple>::value == 9, \
+ this_method_does_not_take_9_arguments); \
+ GMOCK_MOCKER_(9, constness, Method).SetOwnerAndName(this, #Method); \
+ return GMOCK_MOCKER_(9, constness, Method).Invoke(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, \
+ gmock_a9); \
+ } \
+ ::testing::MockSpec<F>& \
+ gmock_##Method(GMOCK_MATCHER_(tn, F, 1) gmock_a1, \
+ GMOCK_MATCHER_(tn, F, 2) gmock_a2, \
+ GMOCK_MATCHER_(tn, F, 3) gmock_a3, \
+ GMOCK_MATCHER_(tn, F, 4) gmock_a4, \
+ GMOCK_MATCHER_(tn, F, 5) gmock_a5, \
+ GMOCK_MATCHER_(tn, F, 6) gmock_a6, \
+ GMOCK_MATCHER_(tn, F, 7) gmock_a7, \
+ GMOCK_MATCHER_(tn, F, 8) gmock_a8, \
+ GMOCK_MATCHER_(tn, F, 9) gmock_a9) constness { \
+ GMOCK_MOCKER_(9, constness, Method).RegisterOwner(this); \
+ return GMOCK_MOCKER_(9, constness, Method).With(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, \
+ gmock_a9); \
+ } \
+ mutable ::testing::FunctionMocker<F> GMOCK_MOCKER_(9, constness, Method)
+
+// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!!
+#define GMOCK_METHOD10_(tn, constness, ct, Method, F) \
+ GMOCK_RESULT_(tn, F) ct Method(GMOCK_ARG_(tn, F, 1) gmock_a1, \
+ GMOCK_ARG_(tn, F, 2) gmock_a2, \
+ GMOCK_ARG_(tn, F, 3) gmock_a3, \
+ GMOCK_ARG_(tn, F, 4) gmock_a4, \
+ GMOCK_ARG_(tn, F, 5) gmock_a5, \
+ GMOCK_ARG_(tn, F, 6) gmock_a6, \
+ GMOCK_ARG_(tn, F, 7) gmock_a7, \
+ GMOCK_ARG_(tn, F, 8) gmock_a8, \
+ GMOCK_ARG_(tn, F, 9) gmock_a9, \
+ GMOCK_ARG_(tn, F, 10) gmock_a10) constness { \
+ GTEST_COMPILE_ASSERT_(::std::tr1::tuple_size< \
+ tn ::testing::internal::Function<F>::ArgumentTuple>::value == 10, \
+ this_method_does_not_take_10_arguments); \
+ GMOCK_MOCKER_(10, constness, Method).SetOwnerAndName(this, #Method); \
+ return GMOCK_MOCKER_(10, constness, Method).Invoke(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, gmock_a9, \
+ gmock_a10); \
+ } \
+ ::testing::MockSpec<F>& \
+ gmock_##Method(GMOCK_MATCHER_(tn, F, 1) gmock_a1, \
+ GMOCK_MATCHER_(tn, F, 2) gmock_a2, \
+ GMOCK_MATCHER_(tn, F, 3) gmock_a3, \
+ GMOCK_MATCHER_(tn, F, 4) gmock_a4, \
+ GMOCK_MATCHER_(tn, F, 5) gmock_a5, \
+ GMOCK_MATCHER_(tn, F, 6) gmock_a6, \
+ GMOCK_MATCHER_(tn, F, 7) gmock_a7, \
+ GMOCK_MATCHER_(tn, F, 8) gmock_a8, \
+ GMOCK_MATCHER_(tn, F, 9) gmock_a9, \
+ GMOCK_MATCHER_(tn, F, 10) gmock_a10) constness { \
+ GMOCK_MOCKER_(10, constness, Method).RegisterOwner(this); \
+ return GMOCK_MOCKER_(10, constness, Method).With(gmock_a1, gmock_a2, \
+ gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, gmock_a9, \
+ gmock_a10); \
+ } \
+ mutable ::testing::FunctionMocker<F> GMOCK_MOCKER_(10, constness, Method)
+
+#define MOCK_METHOD0(m, F) GMOCK_METHOD0_(, , , m, F)
+#define MOCK_METHOD1(m, F) GMOCK_METHOD1_(, , , m, F)
+#define MOCK_METHOD2(m, F) GMOCK_METHOD2_(, , , m, F)
+#define MOCK_METHOD3(m, F) GMOCK_METHOD3_(, , , m, F)
+#define MOCK_METHOD4(m, F) GMOCK_METHOD4_(, , , m, F)
+#define MOCK_METHOD5(m, F) GMOCK_METHOD5_(, , , m, F)
+#define MOCK_METHOD6(m, F) GMOCK_METHOD6_(, , , m, F)
+#define MOCK_METHOD7(m, F) GMOCK_METHOD7_(, , , m, F)
+#define MOCK_METHOD8(m, F) GMOCK_METHOD8_(, , , m, F)
+#define MOCK_METHOD9(m, F) GMOCK_METHOD9_(, , , m, F)
+#define MOCK_METHOD10(m, F) GMOCK_METHOD10_(, , , m, F)
+
+#define MOCK_CONST_METHOD0(m, F) GMOCK_METHOD0_(, const, , m, F)
+#define MOCK_CONST_METHOD1(m, F) GMOCK_METHOD1_(, const, , m, F)
+#define MOCK_CONST_METHOD2(m, F) GMOCK_METHOD2_(, const, , m, F)
+#define MOCK_CONST_METHOD3(m, F) GMOCK_METHOD3_(, const, , m, F)
+#define MOCK_CONST_METHOD4(m, F) GMOCK_METHOD4_(, const, , m, F)
+#define MOCK_CONST_METHOD5(m, F) GMOCK_METHOD5_(, const, , m, F)
+#define MOCK_CONST_METHOD6(m, F) GMOCK_METHOD6_(, const, , m, F)
+#define MOCK_CONST_METHOD7(m, F) GMOCK_METHOD7_(, const, , m, F)
+#define MOCK_CONST_METHOD8(m, F) GMOCK_METHOD8_(, const, , m, F)
+#define MOCK_CONST_METHOD9(m, F) GMOCK_METHOD9_(, const, , m, F)
+#define MOCK_CONST_METHOD10(m, F) GMOCK_METHOD10_(, const, , m, F)
+
+#define MOCK_METHOD0_T(m, F) GMOCK_METHOD0_(typename, , , m, F)
+#define MOCK_METHOD1_T(m, F) GMOCK_METHOD1_(typename, , , m, F)
+#define MOCK_METHOD2_T(m, F) GMOCK_METHOD2_(typename, , , m, F)
+#define MOCK_METHOD3_T(m, F) GMOCK_METHOD3_(typename, , , m, F)
+#define MOCK_METHOD4_T(m, F) GMOCK_METHOD4_(typename, , , m, F)
+#define MOCK_METHOD5_T(m, F) GMOCK_METHOD5_(typename, , , m, F)
+#define MOCK_METHOD6_T(m, F) GMOCK_METHOD6_(typename, , , m, F)
+#define MOCK_METHOD7_T(m, F) GMOCK_METHOD7_(typename, , , m, F)
+#define MOCK_METHOD8_T(m, F) GMOCK_METHOD8_(typename, , , m, F)
+#define MOCK_METHOD9_T(m, F) GMOCK_METHOD9_(typename, , , m, F)
+#define MOCK_METHOD10_T(m, F) GMOCK_METHOD10_(typename, , , m, F)
+
+#define MOCK_CONST_METHOD0_T(m, F) GMOCK_METHOD0_(typename, const, , m, F)
+#define MOCK_CONST_METHOD1_T(m, F) GMOCK_METHOD1_(typename, const, , m, F)
+#define MOCK_CONST_METHOD2_T(m, F) GMOCK_METHOD2_(typename, const, , m, F)
+#define MOCK_CONST_METHOD3_T(m, F) GMOCK_METHOD3_(typename, const, , m, F)
+#define MOCK_CONST_METHOD4_T(m, F) GMOCK_METHOD4_(typename, const, , m, F)
+#define MOCK_CONST_METHOD5_T(m, F) GMOCK_METHOD5_(typename, const, , m, F)
+#define MOCK_CONST_METHOD6_T(m, F) GMOCK_METHOD6_(typename, const, , m, F)
+#define MOCK_CONST_METHOD7_T(m, F) GMOCK_METHOD7_(typename, const, , m, F)
+#define MOCK_CONST_METHOD8_T(m, F) GMOCK_METHOD8_(typename, const, , m, F)
+#define MOCK_CONST_METHOD9_T(m, F) GMOCK_METHOD9_(typename, const, , m, F)
+#define MOCK_CONST_METHOD10_T(m, F) GMOCK_METHOD10_(typename, const, , m, F)
+
+#define MOCK_METHOD0_WITH_CALLTYPE(ct, m, F) GMOCK_METHOD0_(, , ct, m, F)
+#define MOCK_METHOD1_WITH_CALLTYPE(ct, m, F) GMOCK_METHOD1_(, , ct, m, F)
+#define MOCK_METHOD2_WITH_CALLTYPE(ct, m, F) GMOCK_METHOD2_(, , ct, m, F)
+#define MOCK_METHOD3_WITH_CALLTYPE(ct, m, F) GMOCK_METHOD3_(, , ct, m, F)
+#define MOCK_METHOD4_WITH_CALLTYPE(ct, m, F) GMOCK_METHOD4_(, , ct, m, F)
+#define MOCK_METHOD5_WITH_CALLTYPE(ct, m, F) GMOCK_METHOD5_(, , ct, m, F)
+#define MOCK_METHOD6_WITH_CALLTYPE(ct, m, F) GMOCK_METHOD6_(, , ct, m, F)
+#define MOCK_METHOD7_WITH_CALLTYPE(ct, m, F) GMOCK_METHOD7_(, , ct, m, F)
+#define MOCK_METHOD8_WITH_CALLTYPE(ct, m, F) GMOCK_METHOD8_(, , ct, m, F)
+#define MOCK_METHOD9_WITH_CALLTYPE(ct, m, F) GMOCK_METHOD9_(, , ct, m, F)
+#define MOCK_METHOD10_WITH_CALLTYPE(ct, m, F) GMOCK_METHOD10_(, , ct, m, F)
+
+#define MOCK_CONST_METHOD0_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD0_(, const, ct, m, F)
+#define MOCK_CONST_METHOD1_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD1_(, const, ct, m, F)
+#define MOCK_CONST_METHOD2_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD2_(, const, ct, m, F)
+#define MOCK_CONST_METHOD3_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD3_(, const, ct, m, F)
+#define MOCK_CONST_METHOD4_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD4_(, const, ct, m, F)
+#define MOCK_CONST_METHOD5_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD5_(, const, ct, m, F)
+#define MOCK_CONST_METHOD6_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD6_(, const, ct, m, F)
+#define MOCK_CONST_METHOD7_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD7_(, const, ct, m, F)
+#define MOCK_CONST_METHOD8_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD8_(, const, ct, m, F)
+#define MOCK_CONST_METHOD9_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD9_(, const, ct, m, F)
+#define MOCK_CONST_METHOD10_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD10_(, const, ct, m, F)
+
+#define MOCK_METHOD0_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD0_(typename, , ct, m, F)
+#define MOCK_METHOD1_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD1_(typename, , ct, m, F)
+#define MOCK_METHOD2_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD2_(typename, , ct, m, F)
+#define MOCK_METHOD3_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD3_(typename, , ct, m, F)
+#define MOCK_METHOD4_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD4_(typename, , ct, m, F)
+#define MOCK_METHOD5_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD5_(typename, , ct, m, F)
+#define MOCK_METHOD6_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD6_(typename, , ct, m, F)
+#define MOCK_METHOD7_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD7_(typename, , ct, m, F)
+#define MOCK_METHOD8_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD8_(typename, , ct, m, F)
+#define MOCK_METHOD9_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD9_(typename, , ct, m, F)
+#define MOCK_METHOD10_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD10_(typename, , ct, m, F)
+
+#define MOCK_CONST_METHOD0_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD0_(typename, const, ct, m, F)
+#define MOCK_CONST_METHOD1_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD1_(typename, const, ct, m, F)
+#define MOCK_CONST_METHOD2_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD2_(typename, const, ct, m, F)
+#define MOCK_CONST_METHOD3_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD3_(typename, const, ct, m, F)
+#define MOCK_CONST_METHOD4_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD4_(typename, const, ct, m, F)
+#define MOCK_CONST_METHOD5_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD5_(typename, const, ct, m, F)
+#define MOCK_CONST_METHOD6_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD6_(typename, const, ct, m, F)
+#define MOCK_CONST_METHOD7_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD7_(typename, const, ct, m, F)
+#define MOCK_CONST_METHOD8_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD8_(typename, const, ct, m, F)
+#define MOCK_CONST_METHOD9_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD9_(typename, const, ct, m, F)
+#define MOCK_CONST_METHOD10_T_WITH_CALLTYPE(ct, m, F) \
+ GMOCK_METHOD10_(typename, const, ct, m, F)
+
+// A MockFunction<F> class has one mock method whose type is F. It is
+// useful when you just want your test code to emit some messages and
+// have Google Mock verify the right messages are sent (and perhaps at
+// the right times). For example, if you are exercising code:
+//
+// Foo(1);
+// Foo(2);
+// Foo(3);
+//
+// and want to verify that Foo(1) and Foo(3) both invoke
+// mock.Bar("a"), but Foo(2) doesn't invoke anything, you can write:
+//
+// TEST(FooTest, InvokesBarCorrectly) {
+// MyMock mock;
+// MockFunction<void(string check_point_name)> check;
+// {
+// InSequence s;
+//
+// EXPECT_CALL(mock, Bar("a"));
+// EXPECT_CALL(check, Call("1"));
+// EXPECT_CALL(check, Call("2"));
+// EXPECT_CALL(mock, Bar("a"));
+// }
+// Foo(1);
+// check.Call("1");
+// Foo(2);
+// check.Call("2");
+// Foo(3);
+// }
+//
+// The expectation spec says that the first Bar("a") must happen
+// before check point "1", the second Bar("a") must happen after check
+// point "2", and nothing should happen between the two check
+// points. The explicit check points make it easy to tell which
+// Bar("a") is called by which call to Foo().
+template <typename F>
+class MockFunction;
+
+template <typename R>
+class MockFunction<R()> {
+ public:
+ MockFunction() {}
+
+ MOCK_METHOD0_T(Call, R());
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0>
+class MockFunction<R(A0)> {
+ public:
+ MockFunction() {}
+
+ MOCK_METHOD1_T(Call, R(A0));
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1>
+class MockFunction<R(A0, A1)> {
+ public:
+ MockFunction() {}
+
+ MOCK_METHOD2_T(Call, R(A0, A1));
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2>
+class MockFunction<R(A0, A1, A2)> {
+ public:
+ MockFunction() {}
+
+ MOCK_METHOD3_T(Call, R(A0, A1, A2));
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3>
+class MockFunction<R(A0, A1, A2, A3)> {
+ public:
+ MockFunction() {}
+
+ MOCK_METHOD4_T(Call, R(A0, A1, A2, A3));
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3,
+ typename A4>
+class MockFunction<R(A0, A1, A2, A3, A4)> {
+ public:
+ MockFunction() {}
+
+ MOCK_METHOD5_T(Call, R(A0, A1, A2, A3, A4));
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3,
+ typename A4, typename A5>
+class MockFunction<R(A0, A1, A2, A3, A4, A5)> {
+ public:
+ MockFunction() {}
+
+ MOCK_METHOD6_T(Call, R(A0, A1, A2, A3, A4, A5));
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3,
+ typename A4, typename A5, typename A6>
+class MockFunction<R(A0, A1, A2, A3, A4, A5, A6)> {
+ public:
+ MockFunction() {}
+
+ MOCK_METHOD7_T(Call, R(A0, A1, A2, A3, A4, A5, A6));
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3,
+ typename A4, typename A5, typename A6, typename A7>
+class MockFunction<R(A0, A1, A2, A3, A4, A5, A6, A7)> {
+ public:
+ MockFunction() {}
+
+ MOCK_METHOD8_T(Call, R(A0, A1, A2, A3, A4, A5, A6, A7));
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3,
+ typename A4, typename A5, typename A6, typename A7, typename A8>
+class MockFunction<R(A0, A1, A2, A3, A4, A5, A6, A7, A8)> {
+ public:
+ MockFunction() {}
+
+ MOCK_METHOD9_T(Call, R(A0, A1, A2, A3, A4, A5, A6, A7, A8));
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+template <typename R, typename A0, typename A1, typename A2, typename A3,
+ typename A4, typename A5, typename A6, typename A7, typename A8,
+ typename A9>
+class MockFunction<R(A0, A1, A2, A3, A4, A5, A6, A7, A8, A9)> {
+ public:
+ MockFunction() {}
+
+ MOCK_METHOD10_T(Call, R(A0, A1, A2, A3, A4, A5, A6, A7, A8, A9));
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction);
+};
+
+} // namespace testing
+
+#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_
+// This file was GENERATED by command:
+// pump.py gmock-generated-matchers.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some commonly used variadic matchers.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+namespace testing {
+namespace internal {
+
+// The type of the i-th (0-based) field of Tuple.
+#define GMOCK_FIELD_TYPE_(Tuple, i) \
+ typename ::std::tr1::tuple_element<i, Tuple>::type
+
+// TupleFields<Tuple, k0, ..., kn> is for selecting fields from a
+// tuple of type Tuple. It has two members:
+//
+// type: a tuple type whose i-th field is the ki-th field of Tuple.
+// GetSelectedFields(t): returns fields k0, ..., and kn of t as a tuple.
+//
+// For example, in class TupleFields<tuple<bool, char, int>, 2, 0>, we have:
+//
+// type is tuple<int, bool>, and
+// GetSelectedFields(make_tuple(true, 'a', 42)) is (42, true).
+
+template <class Tuple, int k0 = -1, int k1 = -1, int k2 = -1, int k3 = -1,
+ int k4 = -1, int k5 = -1, int k6 = -1, int k7 = -1, int k8 = -1,
+ int k9 = -1>
+class TupleFields;
+
+// This generic version is used when there are 10 selectors.
+template <class Tuple, int k0, int k1, int k2, int k3, int k4, int k5, int k6,
+ int k7, int k8, int k9>
+class TupleFields {
+ public:
+ typedef ::std::tr1::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+ GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+ GMOCK_FIELD_TYPE_(Tuple, k3), GMOCK_FIELD_TYPE_(Tuple, k4),
+ GMOCK_FIELD_TYPE_(Tuple, k5), GMOCK_FIELD_TYPE_(Tuple, k6),
+ GMOCK_FIELD_TYPE_(Tuple, k7), GMOCK_FIELD_TYPE_(Tuple, k8),
+ GMOCK_FIELD_TYPE_(Tuple, k9)> type;
+ static type GetSelectedFields(const Tuple& t) {
+ using ::std::tr1::get;
+ return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t), get<k4>(t),
+ get<k5>(t), get<k6>(t), get<k7>(t), get<k8>(t), get<k9>(t));
+ }
+};
+
+// The following specialization is used for 0 ~ 9 selectors.
+
+template <class Tuple>
+class TupleFields<Tuple, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+ typedef ::std::tr1::tuple<> type;
+ static type GetSelectedFields(const Tuple& /* t */) {
+ using ::std::tr1::get;
+ return type();
+ }
+};
+
+template <class Tuple, int k0>
+class TupleFields<Tuple, k0, -1, -1, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+ typedef ::std::tr1::tuple<GMOCK_FIELD_TYPE_(Tuple, k0)> type;
+ static type GetSelectedFields(const Tuple& t) {
+ using ::std::tr1::get;
+ return type(get<k0>(t));
+ }
+};
+
+template <class Tuple, int k0, int k1>
+class TupleFields<Tuple, k0, k1, -1, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+ typedef ::std::tr1::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+ GMOCK_FIELD_TYPE_(Tuple, k1)> type;
+ static type GetSelectedFields(const Tuple& t) {
+ using ::std::tr1::get;
+ return type(get<k0>(t), get<k1>(t));
+ }
+};
+
+template <class Tuple, int k0, int k1, int k2>
+class TupleFields<Tuple, k0, k1, k2, -1, -1, -1, -1, -1, -1, -1> {
+ public:
+ typedef ::std::tr1::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+ GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2)> type;
+ static type GetSelectedFields(const Tuple& t) {
+ using ::std::tr1::get;
+ return type(get<k0>(t), get<k1>(t), get<k2>(t));
+ }
+};
+
+template <class Tuple, int k0, int k1, int k2, int k3>
+class TupleFields<Tuple, k0, k1, k2, k3, -1, -1, -1, -1, -1, -1> {
+ public:
+ typedef ::std::tr1::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+ GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+ GMOCK_FIELD_TYPE_(Tuple, k3)> type;
+ static type GetSelectedFields(const Tuple& t) {
+ using ::std::tr1::get;
+ return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t));
+ }
+};
+
+template <class Tuple, int k0, int k1, int k2, int k3, int k4>
+class TupleFields<Tuple, k0, k1, k2, k3, k4, -1, -1, -1, -1, -1> {
+ public:
+ typedef ::std::tr1::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+ GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+ GMOCK_FIELD_TYPE_(Tuple, k3), GMOCK_FIELD_TYPE_(Tuple, k4)> type;
+ static type GetSelectedFields(const Tuple& t) {
+ using ::std::tr1::get;
+ return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t), get<k4>(t));
+ }
+};
+
+template <class Tuple, int k0, int k1, int k2, int k3, int k4, int k5>
+class TupleFields<Tuple, k0, k1, k2, k3, k4, k5, -1, -1, -1, -1> {
+ public:
+ typedef ::std::tr1::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+ GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+ GMOCK_FIELD_TYPE_(Tuple, k3), GMOCK_FIELD_TYPE_(Tuple, k4),
+ GMOCK_FIELD_TYPE_(Tuple, k5)> type;
+ static type GetSelectedFields(const Tuple& t) {
+ using ::std::tr1::get;
+ return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t), get<k4>(t),
+ get<k5>(t));
+ }
+};
+
+template <class Tuple, int k0, int k1, int k2, int k3, int k4, int k5, int k6>
+class TupleFields<Tuple, k0, k1, k2, k3, k4, k5, k6, -1, -1, -1> {
+ public:
+ typedef ::std::tr1::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+ GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+ GMOCK_FIELD_TYPE_(Tuple, k3), GMOCK_FIELD_TYPE_(Tuple, k4),
+ GMOCK_FIELD_TYPE_(Tuple, k5), GMOCK_FIELD_TYPE_(Tuple, k6)> type;
+ static type GetSelectedFields(const Tuple& t) {
+ using ::std::tr1::get;
+ return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t), get<k4>(t),
+ get<k5>(t), get<k6>(t));
+ }
+};
+
+template <class Tuple, int k0, int k1, int k2, int k3, int k4, int k5, int k6,
+ int k7>
+class TupleFields<Tuple, k0, k1, k2, k3, k4, k5, k6, k7, -1, -1> {
+ public:
+ typedef ::std::tr1::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+ GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+ GMOCK_FIELD_TYPE_(Tuple, k3), GMOCK_FIELD_TYPE_(Tuple, k4),
+ GMOCK_FIELD_TYPE_(Tuple, k5), GMOCK_FIELD_TYPE_(Tuple, k6),
+ GMOCK_FIELD_TYPE_(Tuple, k7)> type;
+ static type GetSelectedFields(const Tuple& t) {
+ using ::std::tr1::get;
+ return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t), get<k4>(t),
+ get<k5>(t), get<k6>(t), get<k7>(t));
+ }
+};
+
+template <class Tuple, int k0, int k1, int k2, int k3, int k4, int k5, int k6,
+ int k7, int k8>
+class TupleFields<Tuple, k0, k1, k2, k3, k4, k5, k6, k7, k8, -1> {
+ public:
+ typedef ::std::tr1::tuple<GMOCK_FIELD_TYPE_(Tuple, k0),
+ GMOCK_FIELD_TYPE_(Tuple, k1), GMOCK_FIELD_TYPE_(Tuple, k2),
+ GMOCK_FIELD_TYPE_(Tuple, k3), GMOCK_FIELD_TYPE_(Tuple, k4),
+ GMOCK_FIELD_TYPE_(Tuple, k5), GMOCK_FIELD_TYPE_(Tuple, k6),
+ GMOCK_FIELD_TYPE_(Tuple, k7), GMOCK_FIELD_TYPE_(Tuple, k8)> type;
+ static type GetSelectedFields(const Tuple& t) {
+ using ::std::tr1::get;
+ return type(get<k0>(t), get<k1>(t), get<k2>(t), get<k3>(t), get<k4>(t),
+ get<k5>(t), get<k6>(t), get<k7>(t), get<k8>(t));
+ }
+};
+
+#undef GMOCK_FIELD_TYPE_
+
+// Implements the Args() matcher.
+template <class ArgsTuple, int k0 = -1, int k1 = -1, int k2 = -1, int k3 = -1,
+ int k4 = -1, int k5 = -1, int k6 = -1, int k7 = -1, int k8 = -1,
+ int k9 = -1>
+class ArgsMatcherImpl : public MatcherInterface<ArgsTuple> {
+ public:
+ // ArgsTuple may have top-level const or reference modifiers.
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(ArgsTuple) RawArgsTuple;
+ typedef typename internal::TupleFields<RawArgsTuple, k0, k1, k2, k3, k4, k5,
+ k6, k7, k8, k9>::type SelectedArgs;
+ typedef Matcher<const SelectedArgs&> MonomorphicInnerMatcher;
+
+ template <typename InnerMatcher>
+ explicit ArgsMatcherImpl(const InnerMatcher& inner_matcher)
+ : inner_matcher_(SafeMatcherCast<const SelectedArgs&>(inner_matcher)) {}
+
+ virtual bool MatchAndExplain(ArgsTuple args,
+ MatchResultListener* listener) const {
+ const SelectedArgs& selected_args = GetSelectedArgs(args);
+ if (!listener->IsInterested())
+ return inner_matcher_.Matches(selected_args);
+
+ PrintIndices(listener->stream());
+ *listener << "are " << PrintToString(selected_args);
+
+ StringMatchResultListener inner_listener;
+ const bool match = inner_matcher_.MatchAndExplain(selected_args,
+ &inner_listener);
+ PrintIfNotEmpty(inner_listener.str(), listener->stream());
+ return match;
+ }
+
+ virtual void DescribeTo(::std::ostream* os) const {
+ *os << "are a tuple ";
+ PrintIndices(os);
+ inner_matcher_.DescribeTo(os);
+ }
+
+ virtual void DescribeNegationTo(::std::ostream* os) const {
+ *os << "are a tuple ";
+ PrintIndices(os);
+ inner_matcher_.DescribeNegationTo(os);
+ }
+
+ private:
+ static SelectedArgs GetSelectedArgs(ArgsTuple args) {
+ return TupleFields<RawArgsTuple, k0, k1, k2, k3, k4, k5, k6, k7, k8,
+ k9>::GetSelectedFields(args);
+ }
+
+ // Prints the indices of the selected fields.
+ static void PrintIndices(::std::ostream* os) {
+ *os << "whose fields (";
+ const int indices[10] = { k0, k1, k2, k3, k4, k5, k6, k7, k8, k9 };
+ for (int i = 0; i < 10; i++) {
+ if (indices[i] < 0)
+ break;
+
+ if (i >= 1)
+ *os << ", ";
+
+ *os << "#" << indices[i];
+ }
+ *os << ") ";
+ }
+
+ const MonomorphicInnerMatcher inner_matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(ArgsMatcherImpl);
+};
+
+template <class InnerMatcher, int k0 = -1, int k1 = -1, int k2 = -1,
+ int k3 = -1, int k4 = -1, int k5 = -1, int k6 = -1, int k7 = -1,
+ int k8 = -1, int k9 = -1>
+class ArgsMatcher {
+ public:
+ explicit ArgsMatcher(const InnerMatcher& inner_matcher)
+ : inner_matcher_(inner_matcher) {}
+
+ template <typename ArgsTuple>
+ operator Matcher<ArgsTuple>() const {
+ return MakeMatcher(new ArgsMatcherImpl<ArgsTuple, k0, k1, k2, k3, k4, k5,
+ k6, k7, k8, k9>(inner_matcher_));
+ }
+
+ private:
+ const InnerMatcher inner_matcher_;
+
+ GTEST_DISALLOW_ASSIGN_(ArgsMatcher);
+};
+
+// Implements ElementsAre() of 1-10 arguments.
+
+template <typename T1>
+class ElementsAreMatcher1 {
+ public:
+ explicit ElementsAreMatcher1(const T1& e1) : e1_(e1) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef typename internal::StlContainerView<RawContainer>::type::value_type
+ Element;
+
+ // Nokia's Symbian Compiler has a nasty bug where the object put
+ // in a one-element local array is not destructed when the array
+ // goes out of scope. This leads to obvious badness as we've
+ // added the linked_ptr in it to our other linked_ptrs list.
+ // Hence we implement ElementsAreMatcher1 specially to avoid using
+ // a local array.
+ const Matcher<const Element&> matcher =
+ MatcherCast<const Element&>(e1_);
+ return MakeMatcher(new ElementsAreMatcherImpl<Container>(&matcher, 1));
+ }
+
+ private:
+ const T1& e1_;
+
+ GTEST_DISALLOW_ASSIGN_(ElementsAreMatcher1);
+};
+
+template <typename T1, typename T2>
+class ElementsAreMatcher2 {
+ public:
+ ElementsAreMatcher2(const T1& e1, const T2& e2) : e1_(e1), e2_(e2) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef typename internal::StlContainerView<RawContainer>::type::value_type
+ Element;
+
+ const Matcher<const Element&> matchers[] = {
+ MatcherCast<const Element&>(e1_),
+ MatcherCast<const Element&>(e2_),
+ };
+
+ return MakeMatcher(new ElementsAreMatcherImpl<Container>(matchers, 2));
+ }
+
+ private:
+ const T1& e1_;
+ const T2& e2_;
+
+ GTEST_DISALLOW_ASSIGN_(ElementsAreMatcher2);
+};
+
+template <typename T1, typename T2, typename T3>
+class ElementsAreMatcher3 {
+ public:
+ ElementsAreMatcher3(const T1& e1, const T2& e2, const T3& e3) : e1_(e1),
+ e2_(e2), e3_(e3) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef typename internal::StlContainerView<RawContainer>::type::value_type
+ Element;
+
+ const Matcher<const Element&> matchers[] = {
+ MatcherCast<const Element&>(e1_),
+ MatcherCast<const Element&>(e2_),
+ MatcherCast<const Element&>(e3_),
+ };
+
+ return MakeMatcher(new ElementsAreMatcherImpl<Container>(matchers, 3));
+ }
+
+ private:
+ const T1& e1_;
+ const T2& e2_;
+ const T3& e3_;
+
+ GTEST_DISALLOW_ASSIGN_(ElementsAreMatcher3);
+};
+
+template <typename T1, typename T2, typename T3, typename T4>
+class ElementsAreMatcher4 {
+ public:
+ ElementsAreMatcher4(const T1& e1, const T2& e2, const T3& e3,
+ const T4& e4) : e1_(e1), e2_(e2), e3_(e3), e4_(e4) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef typename internal::StlContainerView<RawContainer>::type::value_type
+ Element;
+
+ const Matcher<const Element&> matchers[] = {
+ MatcherCast<const Element&>(e1_),
+ MatcherCast<const Element&>(e2_),
+ MatcherCast<const Element&>(e3_),
+ MatcherCast<const Element&>(e4_),
+ };
+
+ return MakeMatcher(new ElementsAreMatcherImpl<Container>(matchers, 4));
+ }
+
+ private:
+ const T1& e1_;
+ const T2& e2_;
+ const T3& e3_;
+ const T4& e4_;
+
+ GTEST_DISALLOW_ASSIGN_(ElementsAreMatcher4);
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+class ElementsAreMatcher5 {
+ public:
+ ElementsAreMatcher5(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+ const T5& e5) : e1_(e1), e2_(e2), e3_(e3), e4_(e4), e5_(e5) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef typename internal::StlContainerView<RawContainer>::type::value_type
+ Element;
+
+ const Matcher<const Element&> matchers[] = {
+ MatcherCast<const Element&>(e1_),
+ MatcherCast<const Element&>(e2_),
+ MatcherCast<const Element&>(e3_),
+ MatcherCast<const Element&>(e4_),
+ MatcherCast<const Element&>(e5_),
+ };
+
+ return MakeMatcher(new ElementsAreMatcherImpl<Container>(matchers, 5));
+ }
+
+ private:
+ const T1& e1_;
+ const T2& e2_;
+ const T3& e3_;
+ const T4& e4_;
+ const T5& e5_;
+
+ GTEST_DISALLOW_ASSIGN_(ElementsAreMatcher5);
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+class ElementsAreMatcher6 {
+ public:
+ ElementsAreMatcher6(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+ const T5& e5, const T6& e6) : e1_(e1), e2_(e2), e3_(e3), e4_(e4),
+ e5_(e5), e6_(e6) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef typename internal::StlContainerView<RawContainer>::type::value_type
+ Element;
+
+ const Matcher<const Element&> matchers[] = {
+ MatcherCast<const Element&>(e1_),
+ MatcherCast<const Element&>(e2_),
+ MatcherCast<const Element&>(e3_),
+ MatcherCast<const Element&>(e4_),
+ MatcherCast<const Element&>(e5_),
+ MatcherCast<const Element&>(e6_),
+ };
+
+ return MakeMatcher(new ElementsAreMatcherImpl<Container>(matchers, 6));
+ }
+
+ private:
+ const T1& e1_;
+ const T2& e2_;
+ const T3& e3_;
+ const T4& e4_;
+ const T5& e5_;
+ const T6& e6_;
+
+ GTEST_DISALLOW_ASSIGN_(ElementsAreMatcher6);
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+class ElementsAreMatcher7 {
+ public:
+ ElementsAreMatcher7(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+ const T5& e5, const T6& e6, const T7& e7) : e1_(e1), e2_(e2), e3_(e3),
+ e4_(e4), e5_(e5), e6_(e6), e7_(e7) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef typename internal::StlContainerView<RawContainer>::type::value_type
+ Element;
+
+ const Matcher<const Element&> matchers[] = {
+ MatcherCast<const Element&>(e1_),
+ MatcherCast<const Element&>(e2_),
+ MatcherCast<const Element&>(e3_),
+ MatcherCast<const Element&>(e4_),
+ MatcherCast<const Element&>(e5_),
+ MatcherCast<const Element&>(e6_),
+ MatcherCast<const Element&>(e7_),
+ };
+
+ return MakeMatcher(new ElementsAreMatcherImpl<Container>(matchers, 7));
+ }
+
+ private:
+ const T1& e1_;
+ const T2& e2_;
+ const T3& e3_;
+ const T4& e4_;
+ const T5& e5_;
+ const T6& e6_;
+ const T7& e7_;
+
+ GTEST_DISALLOW_ASSIGN_(ElementsAreMatcher7);
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+class ElementsAreMatcher8 {
+ public:
+ ElementsAreMatcher8(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+ const T5& e5, const T6& e6, const T7& e7, const T8& e8) : e1_(e1),
+ e2_(e2), e3_(e3), e4_(e4), e5_(e5), e6_(e6), e7_(e7), e8_(e8) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef typename internal::StlContainerView<RawContainer>::type::value_type
+ Element;
+
+ const Matcher<const Element&> matchers[] = {
+ MatcherCast<const Element&>(e1_),
+ MatcherCast<const Element&>(e2_),
+ MatcherCast<const Element&>(e3_),
+ MatcherCast<const Element&>(e4_),
+ MatcherCast<const Element&>(e5_),
+ MatcherCast<const Element&>(e6_),
+ MatcherCast<const Element&>(e7_),
+ MatcherCast<const Element&>(e8_),
+ };
+
+ return MakeMatcher(new ElementsAreMatcherImpl<Container>(matchers, 8));
+ }
+
+ private:
+ const T1& e1_;
+ const T2& e2_;
+ const T3& e3_;
+ const T4& e4_;
+ const T5& e5_;
+ const T6& e6_;
+ const T7& e7_;
+ const T8& e8_;
+
+ GTEST_DISALLOW_ASSIGN_(ElementsAreMatcher8);
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+class ElementsAreMatcher9 {
+ public:
+ ElementsAreMatcher9(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+ const T5& e5, const T6& e6, const T7& e7, const T8& e8,
+ const T9& e9) : e1_(e1), e2_(e2), e3_(e3), e4_(e4), e5_(e5), e6_(e6),
+ e7_(e7), e8_(e8), e9_(e9) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef typename internal::StlContainerView<RawContainer>::type::value_type
+ Element;
+
+ const Matcher<const Element&> matchers[] = {
+ MatcherCast<const Element&>(e1_),
+ MatcherCast<const Element&>(e2_),
+ MatcherCast<const Element&>(e3_),
+ MatcherCast<const Element&>(e4_),
+ MatcherCast<const Element&>(e5_),
+ MatcherCast<const Element&>(e6_),
+ MatcherCast<const Element&>(e7_),
+ MatcherCast<const Element&>(e8_),
+ MatcherCast<const Element&>(e9_),
+ };
+
+ return MakeMatcher(new ElementsAreMatcherImpl<Container>(matchers, 9));
+ }
+
+ private:
+ const T1& e1_;
+ const T2& e2_;
+ const T3& e3_;
+ const T4& e4_;
+ const T5& e5_;
+ const T6& e6_;
+ const T7& e7_;
+ const T8& e8_;
+ const T9& e9_;
+
+ GTEST_DISALLOW_ASSIGN_(ElementsAreMatcher9);
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+class ElementsAreMatcher10 {
+ public:
+ ElementsAreMatcher10(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+ const T5& e5, const T6& e6, const T7& e7, const T8& e8, const T9& e9,
+ const T10& e10) : e1_(e1), e2_(e2), e3_(e3), e4_(e4), e5_(e5), e6_(e6),
+ e7_(e7), e8_(e8), e9_(e9), e10_(e10) {}
+
+ template <typename Container>
+ operator Matcher<Container>() const {
+ typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer;
+ typedef typename internal::StlContainerView<RawContainer>::type::value_type
+ Element;
+
+ const Matcher<const Element&> matchers[] = {
+ MatcherCast<const Element&>(e1_),
+ MatcherCast<const Element&>(e2_),
+ MatcherCast<const Element&>(e3_),
+ MatcherCast<const Element&>(e4_),
+ MatcherCast<const Element&>(e5_),
+ MatcherCast<const Element&>(e6_),
+ MatcherCast<const Element&>(e7_),
+ MatcherCast<const Element&>(e8_),
+ MatcherCast<const Element&>(e9_),
+ MatcherCast<const Element&>(e10_),
+ };
+
+ return MakeMatcher(new ElementsAreMatcherImpl<Container>(matchers, 10));
+ }
+
+ private:
+ const T1& e1_;
+ const T2& e2_;
+ const T3& e3_;
+ const T4& e4_;
+ const T5& e5_;
+ const T6& e6_;
+ const T7& e7_;
+ const T8& e8_;
+ const T9& e9_;
+ const T10& e10_;
+
+ GTEST_DISALLOW_ASSIGN_(ElementsAreMatcher10);
+};
+
+} // namespace internal
+
+// Args<N1, N2, ..., Nk>(a_matcher) matches a tuple if the selected
+// fields of it matches a_matcher. C++ doesn't support default
+// arguments for function templates, so we have to overload it.
+template <typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher>
+Args(const InnerMatcher& matcher) {
+ return internal::ArgsMatcher<InnerMatcher>(matcher);
+}
+
+template <int k1, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1>
+Args(const InnerMatcher& matcher) {
+ return internal::ArgsMatcher<InnerMatcher, k1>(matcher);
+}
+
+template <int k1, int k2, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2>
+Args(const InnerMatcher& matcher) {
+ return internal::ArgsMatcher<InnerMatcher, k1, k2>(matcher);
+}
+
+template <int k1, int k2, int k3, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3>
+Args(const InnerMatcher& matcher) {
+ return internal::ArgsMatcher<InnerMatcher, k1, k2, k3>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4>
+Args(const InnerMatcher& matcher) {
+ return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5>
+Args(const InnerMatcher& matcher) {
+ return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6>
+Args(const InnerMatcher& matcher) {
+ return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7,
+ typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7>
+Args(const InnerMatcher& matcher) {
+ return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6,
+ k7>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7, int k8,
+ typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7, k8>
+Args(const InnerMatcher& matcher) {
+ return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7,
+ k8>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7, int k8,
+ int k9, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7, k8, k9>
+Args(const InnerMatcher& matcher) {
+ return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7, k8,
+ k9>(matcher);
+}
+
+template <int k1, int k2, int k3, int k4, int k5, int k6, int k7, int k8,
+ int k9, int k10, typename InnerMatcher>
+inline internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7, k8, k9,
+ k10>
+Args(const InnerMatcher& matcher) {
+ return internal::ArgsMatcher<InnerMatcher, k1, k2, k3, k4, k5, k6, k7, k8,
+ k9, k10>(matcher);
+}
+
+// ElementsAre(e0, e1, ..., e_n) matches an STL-style container with
+// (n + 1) elements, where the i-th element in the container must
+// match the i-th argument in the list. Each argument of
+// ElementsAre() can be either a value or a matcher. We support up to
+// 10 arguments.
+//
+// NOTE: Since ElementsAre() cares about the order of the elements, it
+// must not be used with containers whose elements's order is
+// undefined (e.g. hash_map).
+
+inline internal::ElementsAreMatcher0 ElementsAre() {
+ return internal::ElementsAreMatcher0();
+}
+
+template <typename T1>
+inline internal::ElementsAreMatcher1<T1> ElementsAre(const T1& e1) {
+ return internal::ElementsAreMatcher1<T1>(e1);
+}
+
+template <typename T1, typename T2>
+inline internal::ElementsAreMatcher2<T1, T2> ElementsAre(const T1& e1,
+ const T2& e2) {
+ return internal::ElementsAreMatcher2<T1, T2>(e1, e2);
+}
+
+template <typename T1, typename T2, typename T3>
+inline internal::ElementsAreMatcher3<T1, T2, T3> ElementsAre(const T1& e1,
+ const T2& e2, const T3& e3) {
+ return internal::ElementsAreMatcher3<T1, T2, T3>(e1, e2, e3);
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+inline internal::ElementsAreMatcher4<T1, T2, T3, T4> ElementsAre(const T1& e1,
+ const T2& e2, const T3& e3, const T4& e4) {
+ return internal::ElementsAreMatcher4<T1, T2, T3, T4>(e1, e2, e3, e4);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+inline internal::ElementsAreMatcher5<T1, T2, T3, T4,
+ T5> ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+ const T5& e5) {
+ return internal::ElementsAreMatcher5<T1, T2, T3, T4, T5>(e1, e2, e3, e4, e5);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+inline internal::ElementsAreMatcher6<T1, T2, T3, T4, T5,
+ T6> ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+ const T5& e5, const T6& e6) {
+ return internal::ElementsAreMatcher6<T1, T2, T3, T4, T5, T6>(e1, e2, e3, e4,
+ e5, e6);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+inline internal::ElementsAreMatcher7<T1, T2, T3, T4, T5, T6,
+ T7> ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+ const T5& e5, const T6& e6, const T7& e7) {
+ return internal::ElementsAreMatcher7<T1, T2, T3, T4, T5, T6, T7>(e1, e2, e3,
+ e4, e5, e6, e7);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+inline internal::ElementsAreMatcher8<T1, T2, T3, T4, T5, T6, T7,
+ T8> ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+ const T5& e5, const T6& e6, const T7& e7, const T8& e8) {
+ return internal::ElementsAreMatcher8<T1, T2, T3, T4, T5, T6, T7, T8>(e1, e2,
+ e3, e4, e5, e6, e7, e8);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+inline internal::ElementsAreMatcher9<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9> ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+ const T5& e5, const T6& e6, const T7& e7, const T8& e8, const T9& e9) {
+ return internal::ElementsAreMatcher9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(e1,
+ e2, e3, e4, e5, e6, e7, e8, e9);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+inline internal::ElementsAreMatcher10<T1, T2, T3, T4, T5, T6, T7, T8, T9,
+ T10> ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4,
+ const T5& e5, const T6& e6, const T7& e7, const T8& e8, const T9& e9,
+ const T10& e10) {
+ return internal::ElementsAreMatcher10<T1, T2, T3, T4, T5, T6, T7, T8, T9,
+ T10>(e1, e2, e3, e4, e5, e6, e7, e8, e9, e10);
+}
+
+// ElementsAreArray(array) and ElementAreArray(array, count) are like
+// ElementsAre(), except that they take an array of values or
+// matchers. The former form infers the size of 'array', which must
+// be a static C-style array. In the latter form, 'array' can either
+// be a static array or a pointer to a dynamically created array.
+
+template <typename T>
+inline internal::ElementsAreArrayMatcher<T> ElementsAreArray(
+ const T* first, size_t count) {
+ return internal::ElementsAreArrayMatcher<T>(first, count);
+}
+
+template <typename T, size_t N>
+inline internal::ElementsAreArrayMatcher<T>
+ElementsAreArray(const T (&array)[N]) {
+ return internal::ElementsAreArrayMatcher<T>(array, N);
+}
+
+// AllOf(m1, m2, ..., mk) matches any value that matches all of the given
+// sub-matchers. AllOf is called fully qualified to prevent ADL from firing.
+
+template <typename Matcher1, typename Matcher2>
+inline internal::BothOfMatcher<Matcher1, Matcher2>
+AllOf(Matcher1 m1, Matcher2 m2) {
+ return internal::BothOfMatcher<Matcher1, Matcher2>(m1, m2);
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3>
+inline internal::BothOfMatcher<Matcher1, internal::BothOfMatcher<Matcher2,
+ Matcher3> >
+AllOf(Matcher1 m1, Matcher2 m2, Matcher3 m3) {
+ return ::testing::AllOf(m1, ::testing::AllOf(m2, m3));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4>
+inline internal::BothOfMatcher<Matcher1, internal::BothOfMatcher<Matcher2,
+ internal::BothOfMatcher<Matcher3, Matcher4> > >
+AllOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4) {
+ return ::testing::AllOf(m1, ::testing::AllOf(m2, m3, m4));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4, typename Matcher5>
+inline internal::BothOfMatcher<Matcher1, internal::BothOfMatcher<Matcher2,
+ internal::BothOfMatcher<Matcher3, internal::BothOfMatcher<Matcher4,
+ Matcher5> > > >
+AllOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4, Matcher5 m5) {
+ return ::testing::AllOf(m1, ::testing::AllOf(m2, m3, m4, m5));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4, typename Matcher5, typename Matcher6>
+inline internal::BothOfMatcher<Matcher1, internal::BothOfMatcher<Matcher2,
+ internal::BothOfMatcher<Matcher3, internal::BothOfMatcher<Matcher4,
+ internal::BothOfMatcher<Matcher5, Matcher6> > > > >
+AllOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4, Matcher5 m5,
+ Matcher6 m6) {
+ return ::testing::AllOf(m1, ::testing::AllOf(m2, m3, m4, m5, m6));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4, typename Matcher5, typename Matcher6, typename Matcher7>
+inline internal::BothOfMatcher<Matcher1, internal::BothOfMatcher<Matcher2,
+ internal::BothOfMatcher<Matcher3, internal::BothOfMatcher<Matcher4,
+ internal::BothOfMatcher<Matcher5, internal::BothOfMatcher<Matcher6,
+ Matcher7> > > > > >
+AllOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4, Matcher5 m5,
+ Matcher6 m6, Matcher7 m7) {
+ return ::testing::AllOf(m1, ::testing::AllOf(m2, m3, m4, m5, m6, m7));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4, typename Matcher5, typename Matcher6, typename Matcher7,
+ typename Matcher8>
+inline internal::BothOfMatcher<Matcher1, internal::BothOfMatcher<Matcher2,
+ internal::BothOfMatcher<Matcher3, internal::BothOfMatcher<Matcher4,
+ internal::BothOfMatcher<Matcher5, internal::BothOfMatcher<Matcher6,
+ internal::BothOfMatcher<Matcher7, Matcher8> > > > > > >
+AllOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4, Matcher5 m5,
+ Matcher6 m6, Matcher7 m7, Matcher8 m8) {
+ return ::testing::AllOf(m1, ::testing::AllOf(m2, m3, m4, m5, m6, m7, m8));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4, typename Matcher5, typename Matcher6, typename Matcher7,
+ typename Matcher8, typename Matcher9>
+inline internal::BothOfMatcher<Matcher1, internal::BothOfMatcher<Matcher2,
+ internal::BothOfMatcher<Matcher3, internal::BothOfMatcher<Matcher4,
+ internal::BothOfMatcher<Matcher5, internal::BothOfMatcher<Matcher6,
+ internal::BothOfMatcher<Matcher7, internal::BothOfMatcher<Matcher8,
+ Matcher9> > > > > > > >
+AllOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4, Matcher5 m5,
+ Matcher6 m6, Matcher7 m7, Matcher8 m8, Matcher9 m9) {
+ return ::testing::AllOf(m1, ::testing::AllOf(m2, m3, m4, m5, m6, m7, m8, m9));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4, typename Matcher5, typename Matcher6, typename Matcher7,
+ typename Matcher8, typename Matcher9, typename Matcher10>
+inline internal::BothOfMatcher<Matcher1, internal::BothOfMatcher<Matcher2,
+ internal::BothOfMatcher<Matcher3, internal::BothOfMatcher<Matcher4,
+ internal::BothOfMatcher<Matcher5, internal::BothOfMatcher<Matcher6,
+ internal::BothOfMatcher<Matcher7, internal::BothOfMatcher<Matcher8,
+ internal::BothOfMatcher<Matcher9, Matcher10> > > > > > > > >
+AllOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4, Matcher5 m5,
+ Matcher6 m6, Matcher7 m7, Matcher8 m8, Matcher9 m9, Matcher10 m10) {
+ return ::testing::AllOf(m1, ::testing::AllOf(m2, m3, m4, m5, m6, m7, m8, m9,
+ m10));
+}
+
+// AnyOf(m1, m2, ..., mk) matches any value that matches any of the given
+// sub-matchers. AnyOf is called fully qualified to prevent ADL from firing.
+
+template <typename Matcher1, typename Matcher2>
+inline internal::EitherOfMatcher<Matcher1, Matcher2>
+AnyOf(Matcher1 m1, Matcher2 m2) {
+ return internal::EitherOfMatcher<Matcher1, Matcher2>(m1, m2);
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3>
+inline internal::EitherOfMatcher<Matcher1, internal::EitherOfMatcher<Matcher2,
+ Matcher3> >
+AnyOf(Matcher1 m1, Matcher2 m2, Matcher3 m3) {
+ return ::testing::AnyOf(m1, ::testing::AnyOf(m2, m3));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4>
+inline internal::EitherOfMatcher<Matcher1, internal::EitherOfMatcher<Matcher2,
+ internal::EitherOfMatcher<Matcher3, Matcher4> > >
+AnyOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4) {
+ return ::testing::AnyOf(m1, ::testing::AnyOf(m2, m3, m4));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4, typename Matcher5>
+inline internal::EitherOfMatcher<Matcher1, internal::EitherOfMatcher<Matcher2,
+ internal::EitherOfMatcher<Matcher3, internal::EitherOfMatcher<Matcher4,
+ Matcher5> > > >
+AnyOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4, Matcher5 m5) {
+ return ::testing::AnyOf(m1, ::testing::AnyOf(m2, m3, m4, m5));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4, typename Matcher5, typename Matcher6>
+inline internal::EitherOfMatcher<Matcher1, internal::EitherOfMatcher<Matcher2,
+ internal::EitherOfMatcher<Matcher3, internal::EitherOfMatcher<Matcher4,
+ internal::EitherOfMatcher<Matcher5, Matcher6> > > > >
+AnyOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4, Matcher5 m5,
+ Matcher6 m6) {
+ return ::testing::AnyOf(m1, ::testing::AnyOf(m2, m3, m4, m5, m6));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4, typename Matcher5, typename Matcher6, typename Matcher7>
+inline internal::EitherOfMatcher<Matcher1, internal::EitherOfMatcher<Matcher2,
+ internal::EitherOfMatcher<Matcher3, internal::EitherOfMatcher<Matcher4,
+ internal::EitherOfMatcher<Matcher5, internal::EitherOfMatcher<Matcher6,
+ Matcher7> > > > > >
+AnyOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4, Matcher5 m5,
+ Matcher6 m6, Matcher7 m7) {
+ return ::testing::AnyOf(m1, ::testing::AnyOf(m2, m3, m4, m5, m6, m7));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4, typename Matcher5, typename Matcher6, typename Matcher7,
+ typename Matcher8>
+inline internal::EitherOfMatcher<Matcher1, internal::EitherOfMatcher<Matcher2,
+ internal::EitherOfMatcher<Matcher3, internal::EitherOfMatcher<Matcher4,
+ internal::EitherOfMatcher<Matcher5, internal::EitherOfMatcher<Matcher6,
+ internal::EitherOfMatcher<Matcher7, Matcher8> > > > > > >
+AnyOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4, Matcher5 m5,
+ Matcher6 m6, Matcher7 m7, Matcher8 m8) {
+ return ::testing::AnyOf(m1, ::testing::AnyOf(m2, m3, m4, m5, m6, m7, m8));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4, typename Matcher5, typename Matcher6, typename Matcher7,
+ typename Matcher8, typename Matcher9>
+inline internal::EitherOfMatcher<Matcher1, internal::EitherOfMatcher<Matcher2,
+ internal::EitherOfMatcher<Matcher3, internal::EitherOfMatcher<Matcher4,
+ internal::EitherOfMatcher<Matcher5, internal::EitherOfMatcher<Matcher6,
+ internal::EitherOfMatcher<Matcher7, internal::EitherOfMatcher<Matcher8,
+ Matcher9> > > > > > > >
+AnyOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4, Matcher5 m5,
+ Matcher6 m6, Matcher7 m7, Matcher8 m8, Matcher9 m9) {
+ return ::testing::AnyOf(m1, ::testing::AnyOf(m2, m3, m4, m5, m6, m7, m8, m9));
+}
+
+template <typename Matcher1, typename Matcher2, typename Matcher3,
+ typename Matcher4, typename Matcher5, typename Matcher6, typename Matcher7,
+ typename Matcher8, typename Matcher9, typename Matcher10>
+inline internal::EitherOfMatcher<Matcher1, internal::EitherOfMatcher<Matcher2,
+ internal::EitherOfMatcher<Matcher3, internal::EitherOfMatcher<Matcher4,
+ internal::EitherOfMatcher<Matcher5, internal::EitherOfMatcher<Matcher6,
+ internal::EitherOfMatcher<Matcher7, internal::EitherOfMatcher<Matcher8,
+ internal::EitherOfMatcher<Matcher9, Matcher10> > > > > > > > >
+AnyOf(Matcher1 m1, Matcher2 m2, Matcher3 m3, Matcher4 m4, Matcher5 m5,
+ Matcher6 m6, Matcher7 m7, Matcher8 m8, Matcher9 m9, Matcher10 m10) {
+ return ::testing::AnyOf(m1, ::testing::AnyOf(m2, m3, m4, m5, m6, m7, m8, m9,
+ m10));
+}
+
+} // namespace testing
+
+
+// The MATCHER* family of macros can be used in a namespace scope to
+// define custom matchers easily.
+//
+// Basic Usage
+// ===========
+//
+// The syntax
+//
+// MATCHER(name, description_string) { statements; }
+//
+// defines a matcher with the given name that executes the statements,
+// which must return a bool to indicate if the match succeeds. Inside
+// the statements, you can refer to the value being matched by 'arg',
+// and refer to its type by 'arg_type'.
+//
+// The description string documents what the matcher does, and is used
+// to generate the failure message when the match fails. Since a
+// MATCHER() is usually defined in a header file shared by multiple
+// C++ source files, we require the description to be a C-string
+// literal to avoid possible side effects. It can be empty, in which
+// case we'll use the sequence of words in the matcher name as the
+// description.
+//
+// For example:
+//
+// MATCHER(IsEven, "") { return (arg % 2) == 0; }
+//
+// allows you to write
+//
+// // Expects mock_foo.Bar(n) to be called where n is even.
+// EXPECT_CALL(mock_foo, Bar(IsEven()));
+//
+// or,
+//
+// // Verifies that the value of some_expression is even.
+// EXPECT_THAT(some_expression, IsEven());
+//
+// If the above assertion fails, it will print something like:
+//
+// Value of: some_expression
+// Expected: is even
+// Actual: 7
+//
+// where the description "is even" is automatically calculated from the
+// matcher name IsEven.
+//
+// Argument Type
+// =============
+//
+// Note that the type of the value being matched (arg_type) is
+// determined by the context in which you use the matcher and is
+// supplied to you by the compiler, so you don't need to worry about
+// declaring it (nor can you). This allows the matcher to be
+// polymorphic. For example, IsEven() can be used to match any type
+// where the value of "(arg % 2) == 0" can be implicitly converted to
+// a bool. In the "Bar(IsEven())" example above, if method Bar()
+// takes an int, 'arg_type' will be int; if it takes an unsigned long,
+// 'arg_type' will be unsigned long; and so on.
+//
+// Parameterizing Matchers
+// =======================
+//
+// Sometimes you'll want to parameterize the matcher. For that you
+// can use another macro:
+//
+// MATCHER_P(name, param_name, description_string) { statements; }
+//
+// For example:
+//
+// MATCHER_P(HasAbsoluteValue, value, "") { return abs(arg) == value; }
+//
+// will allow you to write:
+//
+// EXPECT_THAT(Blah("a"), HasAbsoluteValue(n));
+//
+// which may lead to this message (assuming n is 10):
+//
+// Value of: Blah("a")
+// Expected: has absolute value 10
+// Actual: -9
+//
+// Note that both the matcher description and its parameter are
+// printed, making the message human-friendly.
+//
+// In the matcher definition body, you can write 'foo_type' to
+// reference the type of a parameter named 'foo'. For example, in the
+// body of MATCHER_P(HasAbsoluteValue, value) above, you can write
+// 'value_type' to refer to the type of 'value'.
+//
+// We also provide MATCHER_P2, MATCHER_P3, ..., up to MATCHER_P10 to
+// support multi-parameter matchers.
+//
+// Describing Parameterized Matchers
+// =================================
+//
+// The last argument to MATCHER*() is a string-typed expression. The
+// expression can reference all of the matcher's parameters and a
+// special bool-typed variable named 'negation'. When 'negation' is
+// false, the expression should evaluate to the matcher's description;
+// otherwise it should evaluate to the description of the negation of
+// the matcher. For example,
+//
+// using testing::PrintToString;
+//
+// MATCHER_P2(InClosedRange, low, hi,
+// string(negation ? "is not" : "is") + " in range [" +
+// PrintToString(low) + ", " + PrintToString(hi) + "]") {
+// return low <= arg && arg <= hi;
+// }
+// ...
+// EXPECT_THAT(3, InClosedRange(4, 6));
+// EXPECT_THAT(3, Not(InClosedRange(2, 4)));
+//
+// would generate two failures that contain the text:
+//
+// Expected: is in range [4, 6]
+// ...
+// Expected: is not in range [2, 4]
+//
+// If you specify "" as the description, the failure message will
+// contain the sequence of words in the matcher name followed by the
+// parameter values printed as a tuple. For example,
+//
+// MATCHER_P2(InClosedRange, low, hi, "") { ... }
+// ...
+// EXPECT_THAT(3, InClosedRange(4, 6));
+// EXPECT_THAT(3, Not(InClosedRange(2, 4)));
+//
+// would generate two failures that contain the text:
+//
+// Expected: in closed range (4, 6)
+// ...
+// Expected: not (in closed range (2, 4))
+//
+// Types of Matcher Parameters
+// ===========================
+//
+// For the purpose of typing, you can view
+//
+// MATCHER_Pk(Foo, p1, ..., pk, description_string) { ... }
+//
+// as shorthand for
+//
+// template <typename p1_type, ..., typename pk_type>
+// FooMatcherPk<p1_type, ..., pk_type>
+// Foo(p1_type p1, ..., pk_type pk) { ... }
+//
+// When you write Foo(v1, ..., vk), the compiler infers the types of
+// the parameters v1, ..., and vk for you. If you are not happy with
+// the result of the type inference, you can specify the types by
+// explicitly instantiating the template, as in Foo<long, bool>(5,
+// false). As said earlier, you don't get to (or need to) specify
+// 'arg_type' as that's determined by the context in which the matcher
+// is used. You can assign the result of expression Foo(p1, ..., pk)
+// to a variable of type FooMatcherPk<p1_type, ..., pk_type>. This
+// can be useful when composing matchers.
+//
+// While you can instantiate a matcher template with reference types,
+// passing the parameters by pointer usually makes your code more
+// readable. If, however, you still want to pass a parameter by
+// reference, be aware that in the failure message generated by the
+// matcher you will see the value of the referenced object but not its
+// address.
+//
+// Explaining Match Results
+// ========================
+//
+// Sometimes the matcher description alone isn't enough to explain why
+// the match has failed or succeeded. For example, when expecting a
+// long string, it can be very helpful to also print the diff between
+// the expected string and the actual one. To achieve that, you can
+// optionally stream additional information to a special variable
+// named result_listener, whose type is a pointer to class
+// MatchResultListener:
+//
+// MATCHER_P(EqualsLongString, str, "") {
+// if (arg == str) return true;
+//
+// *result_listener << "the difference: "
+/// << DiffStrings(str, arg);
+// return false;
+// }
+//
+// Overloading Matchers
+// ====================
+//
+// You can overload matchers with different numbers of parameters:
+//
+// MATCHER_P(Blah, a, description_string1) { ... }
+// MATCHER_P2(Blah, a, b, description_string2) { ... }
+//
+// Caveats
+// =======
+//
+// When defining a new matcher, you should also consider implementing
+// MatcherInterface or using MakePolymorphicMatcher(). These
+// approaches require more work than the MATCHER* macros, but also
+// give you more control on the types of the value being matched and
+// the matcher parameters, which may leads to better compiler error
+// messages when the matcher is used wrong. They also allow
+// overloading matchers based on parameter types (as opposed to just
+// based on the number of parameters).
+//
+// MATCHER*() can only be used in a namespace scope. The reason is
+// that C++ doesn't yet allow function-local types to be used to
+// instantiate templates. The up-coming C++0x standard will fix this.
+// Once that's done, we'll consider supporting using MATCHER*() inside
+// a function.
+//
+// More Information
+// ================
+//
+// To learn more about using these macros, please search for 'MATCHER'
+// on http://code.google.com/p/googlemock/wiki/CookBook.
+
+#define MATCHER(name, description)\
+ class name##Matcher {\
+ public:\
+ template <typename arg_type>\
+ class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+ public:\
+ gmock_Impl()\
+ {}\
+ virtual bool MatchAndExplain(\
+ arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+ virtual void DescribeTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(false);\
+ }\
+ virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(true);\
+ }\
+ private:\
+ ::testing::internal::string FormatDescription(bool negation) const {\
+ const ::testing::internal::string gmock_description = (description);\
+ if (!gmock_description.empty())\
+ return gmock_description;\
+ return ::testing::internal::FormatMatcherDescription(\
+ negation, #name,\
+ ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+ ::std::tr1::tuple<>()));\
+ }\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename arg_type>\
+ operator ::testing::Matcher<arg_type>() const {\
+ return ::testing::Matcher<arg_type>(\
+ new gmock_Impl<arg_type>());\
+ }\
+ name##Matcher() {\
+ }\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##Matcher);\
+ };\
+ inline name##Matcher name() {\
+ return name##Matcher();\
+ }\
+ template <typename arg_type>\
+ bool name##Matcher::gmock_Impl<arg_type>::MatchAndExplain(\
+ arg_type arg,\
+ ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+ const
+
+#define MATCHER_P(name, p0, description)\
+ template <typename p0##_type>\
+ class name##MatcherP {\
+ public:\
+ template <typename arg_type>\
+ class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+ public:\
+ explicit gmock_Impl(p0##_type gmock_p0)\
+ : p0(gmock_p0) {}\
+ virtual bool MatchAndExplain(\
+ arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+ virtual void DescribeTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(false);\
+ }\
+ virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(true);\
+ }\
+ p0##_type p0;\
+ private:\
+ ::testing::internal::string FormatDescription(bool negation) const {\
+ const ::testing::internal::string gmock_description = (description);\
+ if (!gmock_description.empty())\
+ return gmock_description;\
+ return ::testing::internal::FormatMatcherDescription(\
+ negation, #name,\
+ ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+ ::std::tr1::tuple<p0##_type>(p0)));\
+ }\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename arg_type>\
+ operator ::testing::Matcher<arg_type>() const {\
+ return ::testing::Matcher<arg_type>(\
+ new gmock_Impl<arg_type>(p0));\
+ }\
+ name##MatcherP(p0##_type gmock_p0) : p0(gmock_p0) {\
+ }\
+ p0##_type p0;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##MatcherP);\
+ };\
+ template <typename p0##_type>\
+ inline name##MatcherP<p0##_type> name(p0##_type p0) {\
+ return name##MatcherP<p0##_type>(p0);\
+ }\
+ template <typename p0##_type>\
+ template <typename arg_type>\
+ bool name##MatcherP<p0##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+ arg_type arg,\
+ ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+ const
+
+#define MATCHER_P2(name, p0, p1, description)\
+ template <typename p0##_type, typename p1##_type>\
+ class name##MatcherP2 {\
+ public:\
+ template <typename arg_type>\
+ class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+ public:\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1)\
+ : p0(gmock_p0), p1(gmock_p1) {}\
+ virtual bool MatchAndExplain(\
+ arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+ virtual void DescribeTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(false);\
+ }\
+ virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(true);\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ private:\
+ ::testing::internal::string FormatDescription(bool negation) const {\
+ const ::testing::internal::string gmock_description = (description);\
+ if (!gmock_description.empty())\
+ return gmock_description;\
+ return ::testing::internal::FormatMatcherDescription(\
+ negation, #name,\
+ ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+ ::std::tr1::tuple<p0##_type, p1##_type>(p0, p1)));\
+ }\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename arg_type>\
+ operator ::testing::Matcher<arg_type>() const {\
+ return ::testing::Matcher<arg_type>(\
+ new gmock_Impl<arg_type>(p0, p1));\
+ }\
+ name##MatcherP2(p0##_type gmock_p0, p1##_type gmock_p1) : p0(gmock_p0), \
+ p1(gmock_p1) {\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##MatcherP2);\
+ };\
+ template <typename p0##_type, typename p1##_type>\
+ inline name##MatcherP2<p0##_type, p1##_type> name(p0##_type p0, \
+ p1##_type p1) {\
+ return name##MatcherP2<p0##_type, p1##_type>(p0, p1);\
+ }\
+ template <typename p0##_type, typename p1##_type>\
+ template <typename arg_type>\
+ bool name##MatcherP2<p0##_type, \
+ p1##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+ arg_type arg,\
+ ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+ const
+
+#define MATCHER_P3(name, p0, p1, p2, description)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type>\
+ class name##MatcherP3 {\
+ public:\
+ template <typename arg_type>\
+ class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+ public:\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2)\
+ : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) {}\
+ virtual bool MatchAndExplain(\
+ arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+ virtual void DescribeTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(false);\
+ }\
+ virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(true);\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ private:\
+ ::testing::internal::string FormatDescription(bool negation) const {\
+ const ::testing::internal::string gmock_description = (description);\
+ if (!gmock_description.empty())\
+ return gmock_description;\
+ return ::testing::internal::FormatMatcherDescription(\
+ negation, #name,\
+ ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+ ::std::tr1::tuple<p0##_type, p1##_type, p2##_type>(p0, p1, \
+ p2)));\
+ }\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename arg_type>\
+ operator ::testing::Matcher<arg_type>() const {\
+ return ::testing::Matcher<arg_type>(\
+ new gmock_Impl<arg_type>(p0, p1, p2));\
+ }\
+ name##MatcherP3(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) {\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##MatcherP3);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type>\
+ inline name##MatcherP3<p0##_type, p1##_type, p2##_type> name(p0##_type p0, \
+ p1##_type p1, p2##_type p2) {\
+ return name##MatcherP3<p0##_type, p1##_type, p2##_type>(p0, p1, p2);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type>\
+ template <typename arg_type>\
+ bool name##MatcherP3<p0##_type, p1##_type, \
+ p2##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+ arg_type arg,\
+ ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+ const
+
+#define MATCHER_P4(name, p0, p1, p2, p3, description)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type>\
+ class name##MatcherP4 {\
+ public:\
+ template <typename arg_type>\
+ class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+ public:\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3)\
+ : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3) {}\
+ virtual bool MatchAndExplain(\
+ arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+ virtual void DescribeTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(false);\
+ }\
+ virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(true);\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ private:\
+ ::testing::internal::string FormatDescription(bool negation) const {\
+ const ::testing::internal::string gmock_description = (description);\
+ if (!gmock_description.empty())\
+ return gmock_description;\
+ return ::testing::internal::FormatMatcherDescription(\
+ negation, #name,\
+ ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+ ::std::tr1::tuple<p0##_type, p1##_type, p2##_type, \
+ p3##_type>(p0, p1, p2, p3)));\
+ }\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename arg_type>\
+ operator ::testing::Matcher<arg_type>() const {\
+ return ::testing::Matcher<arg_type>(\
+ new gmock_Impl<arg_type>(p0, p1, p2, p3));\
+ }\
+ name##MatcherP4(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3) : p0(gmock_p0), p1(gmock_p1), \
+ p2(gmock_p2), p3(gmock_p3) {\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##MatcherP4);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type>\
+ inline name##MatcherP4<p0##_type, p1##_type, p2##_type, \
+ p3##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, \
+ p3##_type p3) {\
+ return name##MatcherP4<p0##_type, p1##_type, p2##_type, p3##_type>(p0, \
+ p1, p2, p3);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type>\
+ template <typename arg_type>\
+ bool name##MatcherP4<p0##_type, p1##_type, p2##_type, \
+ p3##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+ arg_type arg,\
+ ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+ const
+
+#define MATCHER_P5(name, p0, p1, p2, p3, p4, description)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type>\
+ class name##MatcherP5 {\
+ public:\
+ template <typename arg_type>\
+ class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+ public:\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4)\
+ : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \
+ p4(gmock_p4) {}\
+ virtual bool MatchAndExplain(\
+ arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+ virtual void DescribeTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(false);\
+ }\
+ virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(true);\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ private:\
+ ::testing::internal::string FormatDescription(bool negation) const {\
+ const ::testing::internal::string gmock_description = (description);\
+ if (!gmock_description.empty())\
+ return gmock_description;\
+ return ::testing::internal::FormatMatcherDescription(\
+ negation, #name,\
+ ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+ ::std::tr1::tuple<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type>(p0, p1, p2, p3, p4)));\
+ }\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename arg_type>\
+ operator ::testing::Matcher<arg_type>() const {\
+ return ::testing::Matcher<arg_type>(\
+ new gmock_Impl<arg_type>(p0, p1, p2, p3, p4));\
+ }\
+ name##MatcherP5(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3, \
+ p4##_type gmock_p4) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4) {\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##MatcherP5);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type>\
+ inline name##MatcherP5<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+ p4##_type p4) {\
+ return name##MatcherP5<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type>(p0, p1, p2, p3, p4);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type>\
+ template <typename arg_type>\
+ bool name##MatcherP5<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+ arg_type arg,\
+ ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+ const
+
+#define MATCHER_P6(name, p0, p1, p2, p3, p4, p5, description)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type>\
+ class name##MatcherP6 {\
+ public:\
+ template <typename arg_type>\
+ class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+ public:\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5)\
+ : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \
+ p4(gmock_p4), p5(gmock_p5) {}\
+ virtual bool MatchAndExplain(\
+ arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+ virtual void DescribeTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(false);\
+ }\
+ virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(true);\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ private:\
+ ::testing::internal::string FormatDescription(bool negation) const {\
+ const ::testing::internal::string gmock_description = (description);\
+ if (!gmock_description.empty())\
+ return gmock_description;\
+ return ::testing::internal::FormatMatcherDescription(\
+ negation, #name,\
+ ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+ ::std::tr1::tuple<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type>(p0, p1, p2, p3, p4, p5)));\
+ }\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename arg_type>\
+ operator ::testing::Matcher<arg_type>() const {\
+ return ::testing::Matcher<arg_type>(\
+ new gmock_Impl<arg_type>(p0, p1, p2, p3, p4, p5));\
+ }\
+ name##MatcherP6(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+ p5##_type gmock_p5) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5) {\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##MatcherP6);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type>\
+ inline name##MatcherP6<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, \
+ p3##_type p3, p4##_type p4, p5##_type p5) {\
+ return name##MatcherP6<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type>(p0, p1, p2, p3, p4, p5);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type>\
+ template <typename arg_type>\
+ bool name##MatcherP6<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+ p5##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+ arg_type arg,\
+ ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+ const
+
+#define MATCHER_P7(name, p0, p1, p2, p3, p4, p5, p6, description)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type>\
+ class name##MatcherP7 {\
+ public:\
+ template <typename arg_type>\
+ class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+ public:\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+ p6##_type gmock_p6)\
+ : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \
+ p4(gmock_p4), p5(gmock_p5), p6(gmock_p6) {}\
+ virtual bool MatchAndExplain(\
+ arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+ virtual void DescribeTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(false);\
+ }\
+ virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(true);\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ private:\
+ ::testing::internal::string FormatDescription(bool negation) const {\
+ const ::testing::internal::string gmock_description = (description);\
+ if (!gmock_description.empty())\
+ return gmock_description;\
+ return ::testing::internal::FormatMatcherDescription(\
+ negation, #name,\
+ ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+ ::std::tr1::tuple<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type>(p0, p1, p2, p3, p4, p5, \
+ p6)));\
+ }\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename arg_type>\
+ operator ::testing::Matcher<arg_type>() const {\
+ return ::testing::Matcher<arg_type>(\
+ new gmock_Impl<arg_type>(p0, p1, p2, p3, p4, p5, p6));\
+ }\
+ name##MatcherP7(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+ p5##_type gmock_p5, p6##_type gmock_p6) : p0(gmock_p0), p1(gmock_p1), \
+ p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), \
+ p6(gmock_p6) {\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##MatcherP7);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type>\
+ inline name##MatcherP7<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type> name(p0##_type p0, p1##_type p1, \
+ p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \
+ p6##_type p6) {\
+ return name##MatcherP7<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type>(p0, p1, p2, p3, p4, p5, p6);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type>\
+ template <typename arg_type>\
+ bool name##MatcherP7<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+ p5##_type, p6##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+ arg_type arg,\
+ ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+ const
+
+#define MATCHER_P8(name, p0, p1, p2, p3, p4, p5, p6, p7, description)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type>\
+ class name##MatcherP8 {\
+ public:\
+ template <typename arg_type>\
+ class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+ public:\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+ p6##_type gmock_p6, p7##_type gmock_p7)\
+ : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \
+ p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7) {}\
+ virtual bool MatchAndExplain(\
+ arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+ virtual void DescribeTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(false);\
+ }\
+ virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(true);\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ p7##_type p7;\
+ private:\
+ ::testing::internal::string FormatDescription(bool negation) const {\
+ const ::testing::internal::string gmock_description = (description);\
+ if (!gmock_description.empty())\
+ return gmock_description;\
+ return ::testing::internal::FormatMatcherDescription(\
+ negation, #name,\
+ ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+ ::std::tr1::tuple<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type>(p0, p1, p2, \
+ p3, p4, p5, p6, p7)));\
+ }\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename arg_type>\
+ operator ::testing::Matcher<arg_type>() const {\
+ return ::testing::Matcher<arg_type>(\
+ new gmock_Impl<arg_type>(p0, p1, p2, p3, p4, p5, p6, p7));\
+ }\
+ name##MatcherP8(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+ p5##_type gmock_p5, p6##_type gmock_p6, \
+ p7##_type gmock_p7) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+ p7(gmock_p7) {\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ p7##_type p7;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##MatcherP8);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type>\
+ inline name##MatcherP8<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type> name(p0##_type p0, \
+ p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \
+ p6##_type p6, p7##_type p7) {\
+ return name##MatcherP8<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type>(p0, p1, p2, p3, p4, p5, \
+ p6, p7);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type>\
+ template <typename arg_type>\
+ bool name##MatcherP8<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+ p5##_type, p6##_type, \
+ p7##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+ arg_type arg,\
+ ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+ const
+
+#define MATCHER_P9(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, description)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type>\
+ class name##MatcherP9 {\
+ public:\
+ template <typename arg_type>\
+ class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+ public:\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+ p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8)\
+ : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \
+ p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \
+ p8(gmock_p8) {}\
+ virtual bool MatchAndExplain(\
+ arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+ virtual void DescribeTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(false);\
+ }\
+ virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(true);\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ p7##_type p7;\
+ p8##_type p8;\
+ private:\
+ ::testing::internal::string FormatDescription(bool negation) const {\
+ const ::testing::internal::string gmock_description = (description);\
+ if (!gmock_description.empty())\
+ return gmock_description;\
+ return ::testing::internal::FormatMatcherDescription(\
+ negation, #name,\
+ ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+ ::std::tr1::tuple<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type, \
+ p8##_type>(p0, p1, p2, p3, p4, p5, p6, p7, p8)));\
+ }\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename arg_type>\
+ operator ::testing::Matcher<arg_type>() const {\
+ return ::testing::Matcher<arg_type>(\
+ new gmock_Impl<arg_type>(p0, p1, p2, p3, p4, p5, p6, p7, p8));\
+ }\
+ name##MatcherP9(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+ p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \
+ p8##_type gmock_p8) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \
+ p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \
+ p8(gmock_p8) {\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ p7##_type p7;\
+ p8##_type p8;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##MatcherP9);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type>\
+ inline name##MatcherP9<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type, \
+ p8##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+ p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, \
+ p8##_type p8) {\
+ return name##MatcherP9<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type, p8##_type>(p0, p1, p2, \
+ p3, p4, p5, p6, p7, p8);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type>\
+ template <typename arg_type>\
+ bool name##MatcherP9<p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \
+ p5##_type, p6##_type, p7##_type, \
+ p8##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+ arg_type arg,\
+ ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+ const
+
+#define MATCHER_P10(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, description)\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type, \
+ typename p9##_type>\
+ class name##MatcherP10 {\
+ public:\
+ template <typename arg_type>\
+ class gmock_Impl : public ::testing::MatcherInterface<arg_type> {\
+ public:\
+ gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \
+ p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \
+ p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \
+ p9##_type gmock_p9)\
+ : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \
+ p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \
+ p8(gmock_p8), p9(gmock_p9) {}\
+ virtual bool MatchAndExplain(\
+ arg_type arg, ::testing::MatchResultListener* result_listener) const;\
+ virtual void DescribeTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(false);\
+ }\
+ virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\
+ *gmock_os << FormatDescription(true);\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ p7##_type p7;\
+ p8##_type p8;\
+ p9##_type p9;\
+ private:\
+ ::testing::internal::string FormatDescription(bool negation) const {\
+ const ::testing::internal::string gmock_description = (description);\
+ if (!gmock_description.empty())\
+ return gmock_description;\
+ return ::testing::internal::FormatMatcherDescription(\
+ negation, #name,\
+ ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\
+ ::std::tr1::tuple<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type, p8##_type, \
+ p9##_type>(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)));\
+ }\
+ GTEST_DISALLOW_ASSIGN_(gmock_Impl);\
+ };\
+ template <typename arg_type>\
+ operator ::testing::Matcher<arg_type>() const {\
+ return ::testing::Matcher<arg_type>(\
+ new gmock_Impl<arg_type>(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9));\
+ }\
+ name##MatcherP10(p0##_type gmock_p0, p1##_type gmock_p1, \
+ p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \
+ p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \
+ p8##_type gmock_p8, p9##_type gmock_p9) : p0(gmock_p0), p1(gmock_p1), \
+ p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \
+ p7(gmock_p7), p8(gmock_p8), p9(gmock_p9) {\
+ }\
+ p0##_type p0;\
+ p1##_type p1;\
+ p2##_type p2;\
+ p3##_type p3;\
+ p4##_type p4;\
+ p5##_type p5;\
+ p6##_type p6;\
+ p7##_type p7;\
+ p8##_type p8;\
+ p9##_type p9;\
+ private:\
+ GTEST_DISALLOW_ASSIGN_(name##MatcherP10);\
+ };\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type, \
+ typename p9##_type>\
+ inline name##MatcherP10<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type, p8##_type, \
+ p9##_type> name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \
+ p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \
+ p9##_type p9) {\
+ return name##MatcherP10<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type, p8##_type, p9##_type>(p0, \
+ p1, p2, p3, p4, p5, p6, p7, p8, p9);\
+ }\
+ template <typename p0##_type, typename p1##_type, typename p2##_type, \
+ typename p3##_type, typename p4##_type, typename p5##_type, \
+ typename p6##_type, typename p7##_type, typename p8##_type, \
+ typename p9##_type>\
+ template <typename arg_type>\
+ bool name##MatcherP10<p0##_type, p1##_type, p2##_type, p3##_type, \
+ p4##_type, p5##_type, p6##_type, p7##_type, p8##_type, \
+ p9##_type>::gmock_Impl<arg_type>::MatchAndExplain(\
+ arg_type arg,\
+ ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\
+ const
+
+#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements some actions that depend on gmock-generated-actions.h.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_MORE_ACTIONS_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_MORE_ACTIONS_H_
+
+#include <algorithm>
+
+
+namespace testing {
+namespace internal {
+
+// Implements the Invoke(f) action. The template argument
+// FunctionImpl is the implementation type of f, which can be either a
+// function pointer or a functor. Invoke(f) can be used as an
+// Action<F> as long as f's type is compatible with F (i.e. f can be
+// assigned to a tr1::function<F>).
+template <typename FunctionImpl>
+class InvokeAction {
+ public:
+ // The c'tor makes a copy of function_impl (either a function
+ // pointer or a functor).
+ explicit InvokeAction(FunctionImpl function_impl)
+ : function_impl_(function_impl) {}
+
+ template <typename Result, typename ArgumentTuple>
+ Result Perform(const ArgumentTuple& args) {
+ return InvokeHelper<Result, ArgumentTuple>::Invoke(function_impl_, args);
+ }
+
+ private:
+ FunctionImpl function_impl_;
+
+ GTEST_DISALLOW_ASSIGN_(InvokeAction);
+};
+
+// Implements the Invoke(object_ptr, &Class::Method) action.
+template <class Class, typename MethodPtr>
+class InvokeMethodAction {
+ public:
+ InvokeMethodAction(Class* obj_ptr, MethodPtr method_ptr)
+ : obj_ptr_(obj_ptr), method_ptr_(method_ptr) {}
+
+ template <typename Result, typename ArgumentTuple>
+ Result Perform(const ArgumentTuple& args) const {
+ return InvokeHelper<Result, ArgumentTuple>::InvokeMethod(
+ obj_ptr_, method_ptr_, args);
+ }
+
+ private:
+ Class* const obj_ptr_;
+ const MethodPtr method_ptr_;
+
+ GTEST_DISALLOW_ASSIGN_(InvokeMethodAction);
+};
+
+} // namespace internal
+
+// Various overloads for Invoke().
+
+// Creates an action that invokes 'function_impl' with the mock
+// function's arguments.
+template <typename FunctionImpl>
+PolymorphicAction<internal::InvokeAction<FunctionImpl> > Invoke(
+ FunctionImpl function_impl) {
+ return MakePolymorphicAction(
+ internal::InvokeAction<FunctionImpl>(function_impl));
+}
+
+// Creates an action that invokes the given method on the given object
+// with the mock function's arguments.
+template <class Class, typename MethodPtr>
+PolymorphicAction<internal::InvokeMethodAction<Class, MethodPtr> > Invoke(
+ Class* obj_ptr, MethodPtr method_ptr) {
+ return MakePolymorphicAction(
+ internal::InvokeMethodAction<Class, MethodPtr>(obj_ptr, method_ptr));
+}
+
+// WithoutArgs(inner_action) can be used in a mock function with a
+// non-empty argument list to perform inner_action, which takes no
+// argument. In other words, it adapts an action accepting no
+// argument to one that accepts (and ignores) arguments.
+template <typename InnerAction>
+inline internal::WithArgsAction<InnerAction>
+WithoutArgs(const InnerAction& action) {
+ return internal::WithArgsAction<InnerAction>(action);
+}
+
+// WithArg<k>(an_action) creates an action that passes the k-th
+// (0-based) argument of the mock function to an_action and performs
+// it. It adapts an action accepting one argument to one that accepts
+// multiple arguments. For convenience, we also provide
+// WithArgs<k>(an_action) (defined below) as a synonym.
+template <int k, typename InnerAction>
+inline internal::WithArgsAction<InnerAction, k>
+WithArg(const InnerAction& action) {
+ return internal::WithArgsAction<InnerAction, k>(action);
+}
+
+// The ACTION*() macros trigger warning C4100 (unreferenced formal
+// parameter) in MSVC with -W4. Unfortunately they cannot be fixed in
+// the macro definition, as the warnings are generated when the macro
+// is expanded and macro expansion cannot contain #pragma. Therefore
+// we suppress them here.
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable:4100)
+#endif
+
+// Action ReturnArg<k>() returns the k-th argument of the mock function.
+ACTION_TEMPLATE(ReturnArg,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_0_VALUE_PARAMS()) {
+ return std::tr1::get<k>(args);
+}
+
+// Action SaveArg<k>(pointer) saves the k-th (0-based) argument of the
+// mock function to *pointer.
+ACTION_TEMPLATE(SaveArg,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_1_VALUE_PARAMS(pointer)) {
+ *pointer = ::std::tr1::get<k>(args);
+}
+
+// Action SaveArgPointee<k>(pointer) saves the value pointed to
+// by the k-th (0-based) argument of the mock function to *pointer.
+ACTION_TEMPLATE(SaveArgPointee,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_1_VALUE_PARAMS(pointer)) {
+ *pointer = *::std::tr1::get<k>(args);
+}
+
+// Action SetArgReferee<k>(value) assigns 'value' to the variable
+// referenced by the k-th (0-based) argument of the mock function.
+ACTION_TEMPLATE(SetArgReferee,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_1_VALUE_PARAMS(value)) {
+ typedef typename ::std::tr1::tuple_element<k, args_type>::type argk_type;
+ // Ensures that argument #k is a reference. If you get a compiler
+ // error on the next line, you are using SetArgReferee<k>(value) in
+ // a mock function whose k-th (0-based) argument is not a reference.
+ GTEST_COMPILE_ASSERT_(internal::is_reference<argk_type>::value,
+ SetArgReferee_must_be_used_with_a_reference_argument);
+ ::std::tr1::get<k>(args) = value;
+}
+
+// Action SetArrayArgument<k>(first, last) copies the elements in
+// source range [first, last) to the array pointed to by the k-th
+// (0-based) argument, which can be either a pointer or an
+// iterator. The action does not take ownership of the elements in the
+// source range.
+ACTION_TEMPLATE(SetArrayArgument,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_2_VALUE_PARAMS(first, last)) {
+ // Microsoft compiler deprecates ::std::copy, so we want to suppress warning
+ // 4996 (Function call with parameters that may be unsafe) there.
+#ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4996) // Temporarily disables warning 4996.
+#endif
+ ::std::copy(first, last, ::std::tr1::get<k>(args));
+#ifdef _MSC_VER
+# pragma warning(pop) // Restores the warning state.
+#endif
+}
+
+// Action DeleteArg<k>() deletes the k-th (0-based) argument of the mock
+// function.
+ACTION_TEMPLATE(DeleteArg,
+ HAS_1_TEMPLATE_PARAMS(int, k),
+ AND_0_VALUE_PARAMS()) {
+ delete ::std::tr1::get<k>(args);
+}
+
+// This action returns the value pointed to by 'pointer'.
+ACTION_P(ReturnPointee, pointer) { return *pointer; }
+
+// Action Throw(exception) can be used in a mock function of any type
+// to throw the given exception. Any copyable value can be thrown.
+#if GTEST_HAS_EXCEPTIONS
+
+// Suppresses the 'unreachable code' warning that VC generates in opt modes.
+# ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4702) // Temporarily disables warning 4702.
+# endif
+ACTION_P(Throw, exception) { throw exception; }
+# ifdef _MSC_VER
+# pragma warning(pop) // Restores the warning state.
+# endif
+
+#endif // GTEST_HAS_EXCEPTIONS
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
+
+} // namespace testing
+
+#endif // GMOCK_INCLUDE_GMOCK_GMOCK_MORE_ACTIONS_H_
+// This file was GENERATED by a script. DO NOT EDIT BY HAND!!!
+
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Implements class templates NiceMock and StrictMock.
+//
+// Given a mock class MockFoo that is created using Google Mock,
+// NiceMock<MockFoo> is a subclass of MockFoo that allows
+// uninteresting calls (i.e. calls to mock methods that have no
+// EXPECT_CALL specs), and StrictMock<MockFoo> is a subclass of
+// MockFoo that treats all uninteresting calls as errors.
+//
+// NiceMock and StrictMock "inherits" the constructors of their
+// respective base class, with up-to 10 arguments. Therefore you can
+// write NiceMock<MockFoo>(5, "a") to construct a nice mock where
+// MockFoo has a constructor that accepts (int, const char*), for
+// example.
+//
+// A known limitation is that NiceMock<MockFoo> and
+// StrictMock<MockFoo> only works for mock methods defined using the
+// MOCK_METHOD* family of macros DIRECTLY in the MockFoo class. If a
+// mock method is defined in a base class of MockFoo, the "nice" or
+// "strict" modifier may not affect it, depending on the compiler. In
+// particular, nesting NiceMock and StrictMock is NOT supported.
+//
+// Another known limitation is that the constructors of the base mock
+// cannot have arguments passed by non-const reference, which are
+// banned by the Google C++ style guide anyway.
+
+#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_NICE_STRICT_H_
+#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_NICE_STRICT_H_
+
+
+namespace testing {
+
+template <class MockClass>
+class NiceMock : public MockClass {
+ public:
+ // We don't factor out the constructor body to a common method, as
+ // we have to avoid a possible clash with members of MockClass.
+ NiceMock() {
+ ::testing::Mock::AllowUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ // C++ doesn't (yet) allow inheritance of constructors, so we have
+ // to define it for each arity.
+ template <typename A1>
+ explicit NiceMock(const A1& a1) : MockClass(a1) {
+ ::testing::Mock::AllowUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+ template <typename A1, typename A2>
+ NiceMock(const A1& a1, const A2& a2) : MockClass(a1, a2) {
+ ::testing::Mock::AllowUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3>
+ NiceMock(const A1& a1, const A2& a2, const A3& a3) : MockClass(a1, a2, a3) {
+ ::testing::Mock::AllowUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4>
+ NiceMock(const A1& a1, const A2& a2, const A3& a3,
+ const A4& a4) : MockClass(a1, a2, a3, a4) {
+ ::testing::Mock::AllowUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4, typename A5>
+ NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+ const A5& a5) : MockClass(a1, a2, a3, a4, a5) {
+ ::testing::Mock::AllowUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6>
+ NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+ const A5& a5, const A6& a6) : MockClass(a1, a2, a3, a4, a5, a6) {
+ ::testing::Mock::AllowUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7>
+ NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+ const A5& a5, const A6& a6, const A7& a7) : MockClass(a1, a2, a3, a4, a5,
+ a6, a7) {
+ ::testing::Mock::AllowUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7, typename A8>
+ NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+ const A5& a5, const A6& a6, const A7& a7, const A8& a8) : MockClass(a1,
+ a2, a3, a4, a5, a6, a7, a8) {
+ ::testing::Mock::AllowUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7, typename A8, typename A9>
+ NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+ const A5& a5, const A6& a6, const A7& a7, const A8& a8,
+ const A9& a9) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9) {
+ ::testing::Mock::AllowUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7, typename A8, typename A9, typename A10>
+ NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+ const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9,
+ const A10& a10) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) {
+ ::testing::Mock::AllowUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ virtual ~NiceMock() {
+ ::testing::Mock::UnregisterCallReaction(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(NiceMock);
+};
+
+template <class MockClass>
+class StrictMock : public MockClass {
+ public:
+ // We don't factor out the constructor body to a common method, as
+ // we have to avoid a possible clash with members of MockClass.
+ StrictMock() {
+ ::testing::Mock::FailUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1>
+ explicit StrictMock(const A1& a1) : MockClass(a1) {
+ ::testing::Mock::FailUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+ template <typename A1, typename A2>
+ StrictMock(const A1& a1, const A2& a2) : MockClass(a1, a2) {
+ ::testing::Mock::FailUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3>
+ StrictMock(const A1& a1, const A2& a2, const A3& a3) : MockClass(a1, a2, a3) {
+ ::testing::Mock::FailUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4>
+ StrictMock(const A1& a1, const A2& a2, const A3& a3,
+ const A4& a4) : MockClass(a1, a2, a3, a4) {
+ ::testing::Mock::FailUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4, typename A5>
+ StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+ const A5& a5) : MockClass(a1, a2, a3, a4, a5) {
+ ::testing::Mock::FailUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6>
+ StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+ const A5& a5, const A6& a6) : MockClass(a1, a2, a3, a4, a5, a6) {
+ ::testing::Mock::FailUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7>
+ StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+ const A5& a5, const A6& a6, const A7& a7) : MockClass(a1, a2, a3, a4, a5,
+ a6, a7) {
+ ::testing::Mock::FailUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7, typename A8>
+ StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+ const A5& a5, const A6& a6, const A7& a7, const A8& a8) : MockClass(a1,
+ a2, a3, a4, a5, a6, a7, a8) {
+ ::testing::Mock::FailUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7, typename A8, typename A9>
+ StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+ const A5& a5, const A6& a6, const A7& a7, const A8& a8,
+ const A9& a9) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9) {
+ ::testing::Mock::FailUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ template <typename A1, typename A2, typename A3, typename A4, typename A5,
+ typename A6, typename A7, typename A8, typename A9, typename A10>
+ StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
+ const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9,
+ const A10& a10) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) {
+ ::testing::Mock::FailUninterestingCalls(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ virtual ~StrictMock() {
+ ::testing::Mock::UnregisterCallReaction(
+ internal::ImplicitCast_<MockClass*>(this));
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(StrictMock);
+};
+
+// The following specializations catch some (relatively more common)
+// user errors of nesting nice and strict mocks. They do NOT catch
+// all possible errors.
+
+// These specializations are declared but not defined, as NiceMock and
+// StrictMock cannot be nested.
+template <typename MockClass>
+class NiceMock<NiceMock<MockClass> >;
+template <typename MockClass>
+class NiceMock<StrictMock<MockClass> >;
+template <typename MockClass>
+class StrictMock<NiceMock<MockClass> >;
+template <typename MockClass>
+class StrictMock<StrictMock<MockClass> >;
+
+} // namespace testing
+
+#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_NICE_STRICT_H_
+
+namespace testing {
+
+// Declares Google Mock flags that we want a user to use programmatically.
+GMOCK_DECLARE_bool_(catch_leaked_mocks);
+GMOCK_DECLARE_string_(verbose);
+
+// Initializes Google Mock. This must be called before running the
+// tests. In particular, it parses the command line for the flags
+// that Google Mock recognizes. Whenever a Google Mock flag is seen,
+// it is removed from argv, and *argc is decremented.
+//
+// No value is returned. Instead, the Google Mock flag variables are
+// updated.
+//
+// Since Google Test is needed for Google Mock to work, this function
+// also initializes Google Test and parses its flags, if that hasn't
+// been done.
+void InitGoogleMock(int* argc, char** argv);
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+void InitGoogleMock(int* argc, wchar_t** argv);
+
+} // namespace testing
+
+#endif // GMOCK_INCLUDE_GMOCK_GMOCK_H_
diff --git a/internal/ceres/gmock/mock-log.h b/internal/ceres/gmock/mock-log.h
new file mode 100644
index 0000000..bce54ab
--- /dev/null
+++ b/internal/ceres/gmock/mock-log.h
@@ -0,0 +1,160 @@
+// Copyright (c) 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Zhanyong Wan
+//
+// Defines the ScopedMockLog class (using Google C++ Mocking
+// Framework), which is convenient for testing code that uses LOG().
+//
+// NOTE(keir): This is a fork until Google Log exports the scoped mock log
+// class; see: http://code.google.com/p/google-glog/issues/detail?id=88
+
+#ifndef GOOGLE_CERES_INTERNAL_MOCK_LOG_H_
+#define GOOGLE_CERES_INTERNAL_MOCK_LOG_H_
+
+#include <string>
+
+#include <gmock/gmock.h>
+
+#include "glog/logging.h"
+
+// Needed to make the scoped mock log tests work without modification.
+namespace ceres {
+namespace internal {
+using google::WARNING;
+} // namespace internal
+} // namespace ceres
+
+namespace testing {
+
+// A ScopedMockLog object intercepts LOG() messages issued during its
+// lifespan. Using this together with Google C++ Mocking Framework,
+// it's very easy to test how a piece of code calls LOG(). The
+// typical usage:
+//
+// TEST(FooTest, LogsCorrectly) {
+// ScopedMockLog log;
+//
+// // We expect the WARNING "Something bad!" exactly twice.
+// EXPECT_CALL(log, Log(WARNING, _, "Something bad!"))
+// .Times(2);
+//
+// // We allow foo.cc to call LOG(INFO) any number of times.
+// EXPECT_CALL(log, Log(INFO, HasSubstr("/foo.cc"), _))
+// .Times(AnyNumber());
+//
+// Foo(); // Exercises the code under test.
+// }
+class ScopedMockLog : public google::LogSink {
+ public:
+ // When a ScopedMockLog object is constructed, it starts to
+ // intercept logs.
+ ScopedMockLog() { AddLogSink(this); }
+
+ // When the object is destructed, it stops intercepting logs.
+ virtual ~ScopedMockLog() { RemoveLogSink(this); }
+
+ // Implements the mock method:
+ //
+ // void Log(LogSeverity severity, const string& file_path,
+ // const string& message);
+ //
+ // The second argument to Send() is the full path of the source file
+ // in which the LOG() was issued.
+ //
+ // Note, that in a multi-threaded environment, all LOG() messages from a
+ // single thread will be handled in sequence, but that cannot be guaranteed
+ // for messages from different threads. In fact, if the same or multiple
+ // expectations are matched on two threads concurrently, their actions will
+ // be executed concurrently as well and may interleave.
+ MOCK_METHOD3(Log, void(google::LogSeverity severity,
+ const std::string& file_path,
+ const std::string& message));
+
+ private:
+ // Implements the send() virtual function in class LogSink.
+ // Whenever a LOG() statement is executed, this function will be
+ // invoked with information presented in the LOG().
+ //
+ // The method argument list is long and carries much information a
+ // test usually doesn't care about, so we trim the list before
+ // forwarding the call to Log(), which is much easier to use in
+ // tests.
+ //
+ // We still cannot call Log() directly, as it may invoke other LOG()
+ // messages, either due to Invoke, or due to an error logged in
+ // Google C++ Mocking Framework code, which would trigger a deadlock
+ // since a lock is held during send().
+ //
+ // Hence, we save the message for WaitTillSent() which will be called after
+ // the lock on send() is released, and we'll call Log() inside
+ // WaitTillSent(). Since while a single send() call may be running at a
+ // time, multiple WaitTillSent() calls (along with the one send() call) may
+ // be running simultaneously, we ensure thread-safety of the exchange between
+ // send() and WaitTillSent(), and that for each message, LOG(), send(),
+ // WaitTillSent() and Log() are executed in the same thread.
+ virtual void send(google::LogSeverity severity,
+ const char* full_filename,
+ const char* base_filename, int line, const tm* tm_time,
+ const char* message, size_t message_len) {
+ // We are only interested in the log severity, full file name, and
+ // log message.
+ message_info_.severity = severity;
+ message_info_.file_path = full_filename;
+ message_info_.message = std::string(message, message_len);
+ }
+
+ // Implements the WaitTillSent() virtual function in class LogSink.
+ // It will be executed after send() and after the global logging lock is
+ // released, so calls within it (or rather within the Log() method called
+ // within) may also issue LOG() statements.
+ //
+ // LOG(), send(), WaitTillSent() and Log() will occur in the same thread for
+ // a given log message.
+ virtual void WaitTillSent() {
+ // First, and very importantly, we save a copy of the message being
+ // processed before calling Log(), since Log() may indirectly call send()
+ // and WaitTillSent() in the same thread again.
+ MessageInfo message_info = message_info_;
+ Log(message_info.severity, message_info.file_path, message_info.message);
+ }
+
+ // All relevant information about a logged message that needs to be passed
+ // from send() to WaitTillSent().
+ struct MessageInfo {
+ google::LogSeverity severity;
+ std::string file_path;
+ std::string message;
+ };
+ MessageInfo message_info_;
+};
+
+} // namespace testing
+
+#endif // GOOGLE_CERES_INTERNAL_MOCK_LOG_H_
diff --git a/internal/ceres/gmock_gtest_all.cc b/internal/ceres/gmock_gtest_all.cc
new file mode 100644
index 0000000..f7ead68
--- /dev/null
+++ b/internal/ceres/gmock_gtest_all.cc
@@ -0,0 +1,10554 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+// Google C++ Testing Framework (Google Test)
+//
+// Sometimes it's desirable to build Google Test by compiling a single file.
+// This file serves this purpose.
+
+// This line ensures that gtest.h can be compiled on its own, even
+// when it's fused.
+#include "gtest/gtest.h"
+
+// The following lines pull in the real gtest *.cc files.
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Utilities for testing Google Test itself and code that uses Google Test
+// (e.g. frameworks built on top of Google Test).
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+
+
+namespace testing {
+
+// This helper class can be used to mock out Google Test failure reporting
+// so that we can test Google Test or code that builds on Google Test.
+//
+// An object of this class appends a TestPartResult object to the
+// TestPartResultArray object given in the constructor whenever a Google Test
+// failure is reported. It can either intercept only failures that are
+// generated in the same thread that created this object or it can intercept
+// all generated failures. The scope of this mock object can be controlled with
+// the second argument to the two arguments constructor.
+class GTEST_API_ ScopedFakeTestPartResultReporter
+ : public TestPartResultReporterInterface {
+ public:
+ // The two possible mocking modes of this object.
+ enum InterceptMode {
+ INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures.
+ INTERCEPT_ALL_THREADS // Intercepts all failures.
+ };
+
+ // The c'tor sets this object as the test part result reporter used
+ // by Google Test. The 'result' parameter specifies where to report the
+ // results. This reporter will only catch failures generated in the current
+ // thread. DEPRECATED
+ explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result);
+
+ // Same as above, but you can choose the interception scope of this object.
+ ScopedFakeTestPartResultReporter(InterceptMode intercept_mode,
+ TestPartResultArray* result);
+
+ // The d'tor restores the previous test part result reporter.
+ virtual ~ScopedFakeTestPartResultReporter();
+
+ // Appends the TestPartResult object to the TestPartResultArray
+ // received in the constructor.
+ //
+ // This method is from the TestPartResultReporterInterface
+ // interface.
+ virtual void ReportTestPartResult(const TestPartResult& result);
+ private:
+ void Init();
+
+ const InterceptMode intercept_mode_;
+ TestPartResultReporterInterface* old_reporter_;
+ TestPartResultArray* const result_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
+};
+
+namespace internal {
+
+// A helper class for implementing EXPECT_FATAL_FAILURE() and
+// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given
+// TestPartResultArray contains exactly one failure that has the given
+// type and contains the given substring. If that's not the case, a
+// non-fatal failure will be generated.
+class GTEST_API_ SingleFailureChecker {
+ public:
+ // The constructor remembers the arguments.
+ SingleFailureChecker(const TestPartResultArray* results,
+ TestPartResult::Type type,
+ const string& substr);
+ ~SingleFailureChecker();
+ private:
+ const TestPartResultArray* const results_;
+ const TestPartResult::Type type_;
+ const string substr_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
+};
+
+} // namespace internal
+
+} // namespace testing
+
+// A set of macros for testing Google Test assertions or code that's expected
+// to generate Google Test fatal failures. It verifies that the given
+// statement will cause exactly one fatal Google Test failure with 'substr'
+// being part of the failure message.
+//
+// There are two different versions of this macro. EXPECT_FATAL_FAILURE only
+// affects and considers failures generated in the current thread and
+// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
+//
+// The verification of the assertion is done correctly even when the statement
+// throws an exception or aborts the current function.
+//
+// Known restrictions:
+// - 'statement' cannot reference local non-static variables or
+// non-static members of the current object.
+// - 'statement' cannot return a value.
+// - You cannot stream a failure message to this macro.
+//
+// Note that even though the implementations of the following two
+// macros are much alike, we cannot refactor them to use a common
+// helper macro, due to some peculiarity in how the preprocessor
+// works. The AcceptsMacroThatExpandsToUnprotectedComma test in
+// gtest_unittest.cc will fail to compile if we do that.
+#define EXPECT_FATAL_FAILURE(statement, substr) \
+ do { \
+ class GTestExpectFatalFailureHelper {\
+ public:\
+ static void Execute() { statement; }\
+ };\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter:: \
+ INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
+ GTestExpectFatalFailureHelper::Execute();\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
+ do { \
+ class GTestExpectFatalFailureHelper {\
+ public:\
+ static void Execute() { statement; }\
+ };\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter:: \
+ INTERCEPT_ALL_THREADS, &gtest_failures);\
+ GTestExpectFatalFailureHelper::Execute();\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+// A macro for testing Google Test assertions or code that's expected to
+// generate Google Test non-fatal failures. It asserts that the given
+// statement will cause exactly one non-fatal Google Test failure with 'substr'
+// being part of the failure message.
+//
+// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only
+// affects and considers failures generated in the current thread and
+// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
+//
+// 'statement' is allowed to reference local variables and members of
+// the current object.
+//
+// The verification of the assertion is done correctly even when the statement
+// throws an exception or aborts the current function.
+//
+// Known restrictions:
+// - You cannot stream a failure message to this macro.
+//
+// Note that even though the implementations of the following two
+// macros are much alike, we cannot refactor them to use a common
+// helper macro, due to some peculiarity in how the preprocessor
+// works. If we do that, the code won't compile when the user gives
+// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that
+// expands to code containing an unprotected comma. The
+// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc
+// catches that.
+//
+// For the same reason, we have to write
+// if (::testing::internal::AlwaysTrue()) { statement; }
+// instead of
+// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
+// to avoid an MSVC warning on unreachable code.
+#define EXPECT_NONFATAL_FAILURE(statement, substr) \
+ do {\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
+ (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter:: \
+ INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
+ if (::testing::internal::AlwaysTrue()) { statement; }\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
+ do {\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
+ (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS,\
+ &gtest_failures);\
+ if (::testing::internal::AlwaysTrue()) { statement; }\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+
+#include <ctype.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <wchar.h>
+#include <wctype.h>
+
+#include <algorithm>
+#include <ostream> // NOLINT
+#include <sstream>
+#include <vector>
+
+#if GTEST_OS_LINUX
+
+// TODO(kenton@google.com): Use autoconf to detect availability of
+// gettimeofday().
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+
+# include <fcntl.h> // NOLINT
+# include <limits.h> // NOLINT
+# include <sched.h> // NOLINT
+// Declares vsnprintf(). This header is not available on Windows.
+# include <strings.h> // NOLINT
+# include <sys/mman.h> // NOLINT
+# include <sys/time.h> // NOLINT
+# include <unistd.h> // NOLINT
+# include <string>
+
+#elif GTEST_OS_SYMBIAN
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+# include <sys/time.h> // NOLINT
+
+#elif GTEST_OS_ZOS
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+# include <sys/time.h> // NOLINT
+
+// On z/OS we additionally need strings.h for strcasecmp.
+# include <strings.h> // NOLINT
+
+#elif GTEST_OS_WINDOWS_MOBILE // We are on Windows CE.
+
+# include <windows.h> // NOLINT
+
+#elif GTEST_OS_WINDOWS // We are on Windows proper.
+
+# include <io.h> // NOLINT
+# include <sys/timeb.h> // NOLINT
+# include <sys/types.h> // NOLINT
+# include <sys/stat.h> // NOLINT
+
+# if GTEST_OS_WINDOWS_MINGW
+// MinGW has gettimeofday() but not _ftime64().
+// TODO(kenton@google.com): Use autoconf to detect availability of
+// gettimeofday().
+// TODO(kenton@google.com): There are other ways to get the time on
+// Windows, like GetTickCount() or GetSystemTimeAsFileTime(). MinGW
+// supports these. consider using them instead.
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+# include <sys/time.h> // NOLINT
+# endif // GTEST_OS_WINDOWS_MINGW
+
+// cpplint thinks that the header is already included, so we want to
+// silence it.
+# include <windows.h> // NOLINT
+
+#else
+
+// Assume other platforms have gettimeofday().
+// TODO(kenton@google.com): Use autoconf to detect availability of
+// gettimeofday().
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+
+// cpplint thinks that the header is already included, so we want to
+// silence it.
+# include <sys/time.h> // NOLINT
+# include <unistd.h> // NOLINT
+
+#endif // GTEST_OS_LINUX
+
+#if GTEST_HAS_EXCEPTIONS
+# include <stdexcept>
+#endif
+
+#if GTEST_CAN_STREAM_RESULTS_
+# include <arpa/inet.h> // NOLINT
+# include <netdb.h> // NOLINT
+#endif
+
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Utility functions and classes used by the Google C++ testing framework.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// This file contains purely Google Test's internal implementation. Please
+// DO NOT #INCLUDE IT IN A USER PROGRAM.
+
+#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_
+#define GTEST_SRC_GTEST_INTERNAL_INL_H_
+
+// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is
+// part of Google Test's implementation; otherwise it's undefined.
+#if !GTEST_IMPLEMENTATION_
+// A user is trying to include this from his code - just say no.
+# error "gtest-internal-inl.h is part of Google Test's internal implementation."
+# error "It must not be included except by Google Test itself."
+#endif // GTEST_IMPLEMENTATION_
+
+#ifndef _WIN32_WCE
+# include <errno.h>
+#endif // !_WIN32_WCE
+#include <stddef.h>
+#include <stdlib.h> // For strtoll/_strtoul64/malloc/free.
+#include <string.h> // For memmove.
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+
+#if GTEST_OS_WINDOWS
+# include <windows.h> // NOLINT
+#endif // GTEST_OS_WINDOWS
+
+
+namespace testing {
+
+// Declares the flags.
+//
+// We don't want the users to modify this flag in the code, but want
+// Google Test's own unit tests to be able to access it. Therefore we
+// declare it here as opposed to in gtest.h.
+GTEST_DECLARE_bool_(death_test_use_fork);
+
+namespace internal {
+
+// The value of GetTestTypeId() as seen from within the Google Test
+// library. This is solely for testing GetTestTypeId().
+GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest;
+
+// Names of the flags (needed for parsing Google Test flags).
+const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests";
+const char kBreakOnFailureFlag[] = "break_on_failure";
+const char kCatchExceptionsFlag[] = "catch_exceptions";
+const char kColorFlag[] = "color";
+const char kFilterFlag[] = "filter";
+const char kListTestsFlag[] = "list_tests";
+const char kOutputFlag[] = "output";
+const char kPrintTimeFlag[] = "print_time";
+const char kRandomSeedFlag[] = "random_seed";
+const char kRepeatFlag[] = "repeat";
+const char kShuffleFlag[] = "shuffle";
+const char kStackTraceDepthFlag[] = "stack_trace_depth";
+const char kStreamResultToFlag[] = "stream_result_to";
+const char kThrowOnFailureFlag[] = "throw_on_failure";
+
+// A valid random seed must be in [1, kMaxRandomSeed].
+const int kMaxRandomSeed = 99999;
+
+// g_help_flag is true iff the --help flag or an equivalent form is
+// specified on the command line.
+GTEST_API_ extern bool g_help_flag;
+
+// Returns the current time in milliseconds.
+GTEST_API_ TimeInMillis GetTimeInMillis();
+
+// Returns true iff Google Test should use colors in the output.
+GTEST_API_ bool ShouldUseColor(bool stdout_is_tty);
+
+// Formats the given time in milliseconds as seconds.
+GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms);
+
+// Parses a string for an Int32 flag, in the form of "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+GTEST_API_ bool ParseInt32Flag(
+ const char* str, const char* flag, Int32* value);
+
+// Returns a random seed in range [1, kMaxRandomSeed] based on the
+// given --gtest_random_seed flag value.
+inline int GetRandomSeedFromFlag(Int32 random_seed_flag) {
+ const unsigned int raw_seed = (random_seed_flag == 0) ?
+ static_cast<unsigned int>(GetTimeInMillis()) :
+ static_cast<unsigned int>(random_seed_flag);
+
+ // Normalizes the actual seed to range [1, kMaxRandomSeed] such that
+ // it's easy to type.
+ const int normalized_seed =
+ static_cast<int>((raw_seed - 1U) %
+ static_cast<unsigned int>(kMaxRandomSeed)) + 1;
+ return normalized_seed;
+}
+
+// Returns the first valid random seed after 'seed'. The behavior is
+// undefined if 'seed' is invalid. The seed after kMaxRandomSeed is
+// considered to be 1.
+inline int GetNextRandomSeed(int seed) {
+ GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed)
+ << "Invalid random seed " << seed << " - must be in [1, "
+ << kMaxRandomSeed << "].";
+ const int next_seed = seed + 1;
+ return (next_seed > kMaxRandomSeed) ? 1 : next_seed;
+}
+
+// This class saves the values of all Google Test flags in its c'tor, and
+// restores them in its d'tor.
+class GTestFlagSaver {
+ public:
+ // The c'tor.
+ GTestFlagSaver() {
+ also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests);
+ break_on_failure_ = GTEST_FLAG(break_on_failure);
+ catch_exceptions_ = GTEST_FLAG(catch_exceptions);
+ color_ = GTEST_FLAG(color);
+ death_test_style_ = GTEST_FLAG(death_test_style);
+ death_test_use_fork_ = GTEST_FLAG(death_test_use_fork);
+ filter_ = GTEST_FLAG(filter);
+ internal_run_death_test_ = GTEST_FLAG(internal_run_death_test);
+ list_tests_ = GTEST_FLAG(list_tests);
+ output_ = GTEST_FLAG(output);
+ print_time_ = GTEST_FLAG(print_time);
+ random_seed_ = GTEST_FLAG(random_seed);
+ repeat_ = GTEST_FLAG(repeat);
+ shuffle_ = GTEST_FLAG(shuffle);
+ stack_trace_depth_ = GTEST_FLAG(stack_trace_depth);
+ stream_result_to_ = GTEST_FLAG(stream_result_to);
+ throw_on_failure_ = GTEST_FLAG(throw_on_failure);
+ }
+
+ // The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS.
+ ~GTestFlagSaver() {
+ GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_;
+ GTEST_FLAG(break_on_failure) = break_on_failure_;
+ GTEST_FLAG(catch_exceptions) = catch_exceptions_;
+ GTEST_FLAG(color) = color_;
+ GTEST_FLAG(death_test_style) = death_test_style_;
+ GTEST_FLAG(death_test_use_fork) = death_test_use_fork_;
+ GTEST_FLAG(filter) = filter_;
+ GTEST_FLAG(internal_run_death_test) = internal_run_death_test_;
+ GTEST_FLAG(list_tests) = list_tests_;
+ GTEST_FLAG(output) = output_;
+ GTEST_FLAG(print_time) = print_time_;
+ GTEST_FLAG(random_seed) = random_seed_;
+ GTEST_FLAG(repeat) = repeat_;
+ GTEST_FLAG(shuffle) = shuffle_;
+ GTEST_FLAG(stack_trace_depth) = stack_trace_depth_;
+ GTEST_FLAG(stream_result_to) = stream_result_to_;
+ GTEST_FLAG(throw_on_failure) = throw_on_failure_;
+ }
+ private:
+ // Fields for saving the original values of flags.
+ bool also_run_disabled_tests_;
+ bool break_on_failure_;
+ bool catch_exceptions_;
+ String color_;
+ String death_test_style_;
+ bool death_test_use_fork_;
+ String filter_;
+ String internal_run_death_test_;
+ bool list_tests_;
+ String output_;
+ bool print_time_;
+ bool pretty_;
+ internal::Int32 random_seed_;
+ internal::Int32 repeat_;
+ bool shuffle_;
+ internal::Int32 stack_trace_depth_;
+ String stream_result_to_;
+ bool throw_on_failure_;
+} GTEST_ATTRIBUTE_UNUSED_;
+
+// Converts a Unicode code point to a narrow string in UTF-8 encoding.
+// code_point parameter is of type UInt32 because wchar_t may not be
+// wide enough to contain a code point.
+// The output buffer str must containt at least 32 characters.
+// The function returns the address of the output buffer.
+// If the code_point is not a valid Unicode code point
+// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'.
+GTEST_API_ char* CodePointToUtf8(UInt32 code_point, char* str);
+
+// Converts a wide string to a narrow string in UTF-8 encoding.
+// The wide string is assumed to have the following encoding:
+// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
+// UTF-32 if sizeof(wchar_t) == 4 (on Linux)
+// Parameter str points to a null-terminated wide string.
+// Parameter num_chars may additionally limit the number
+// of wchar_t characters processed. -1 is used when the entire string
+// should be processed.
+// If the string contains code points that are not valid Unicode code points
+// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
+// and contains invalid UTF-16 surrogate pairs, values in those pairs
+// will be encoded as individual Unicode characters from Basic Normal Plane.
+GTEST_API_ String WideStringToUtf8(const wchar_t* str, int num_chars);
+
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded();
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (e.g., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+GTEST_API_ bool ShouldShard(const char* total_shards_str,
+ const char* shard_index_str,
+ bool in_subprocess_for_death_test);
+
+// Parses the environment variable var as an Int32. If it is unset,
+// returns default_val. If it is not an Int32, prints an error and
+// and aborts.
+GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val);
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true iff the test should be run on this shard. The test id is
+// some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+GTEST_API_ bool ShouldRunTestOnShard(
+ int total_shards, int shard_index, int test_id);
+
+// STL container utilities.
+
+// Returns the number of elements in the given container that satisfy
+// the given predicate.
+template <class Container, typename Predicate>
+inline int CountIf(const Container& c, Predicate predicate) {
+ // Implemented as an explicit loop since std::count_if() in libCstd on
+ // Solaris has a non-standard signature.
+ int count = 0;
+ for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) {
+ if (predicate(*it))
+ ++count;
+ }
+ return count;
+}
+
+// Applies a function/functor to each element in the container.
+template <class Container, typename Functor>
+void ForEach(const Container& c, Functor functor) {
+ std::for_each(c.begin(), c.end(), functor);
+}
+
+// Returns the i-th element of the vector, or default_value if i is not
+// in range [0, v.size()).
+template <typename E>
+inline E GetElementOr(const std::vector<E>& v, int i, E default_value) {
+ return (i < 0 || i >= static_cast<int>(v.size())) ? default_value : v[i];
+}
+
+// Performs an in-place shuffle of a range of the vector's elements.
+// 'begin' and 'end' are element indices as an STL-style range;
+// i.e. [begin, end) are shuffled, where 'end' == size() means to
+// shuffle to the end of the vector.
+template <typename E>
+void ShuffleRange(internal::Random* random, int begin, int end,
+ std::vector<E>* v) {
+ const int size = static_cast<int>(v->size());
+ GTEST_CHECK_(0 <= begin && begin <= size)
+ << "Invalid shuffle range start " << begin << ": must be in range [0, "
+ << size << "].";
+ GTEST_CHECK_(begin <= end && end <= size)
+ << "Invalid shuffle range finish " << end << ": must be in range ["
+ << begin << ", " << size << "].";
+
+ // Fisher-Yates shuffle, from
+ // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle
+ for (int range_width = end - begin; range_width >= 2; range_width--) {
+ const int last_in_range = begin + range_width - 1;
+ const int selected = begin + random->Generate(range_width);
+ std::swap((*v)[selected], (*v)[last_in_range]);
+ }
+}
+
+// Performs an in-place shuffle of the vector's elements.
+template <typename E>
+inline void Shuffle(internal::Random* random, std::vector<E>* v) {
+ ShuffleRange(random, 0, static_cast<int>(v->size()), v);
+}
+
+// A function for deleting an object. Handy for being used as a
+// functor.
+template <typename T>
+static void Delete(T* x) {
+ delete x;
+}
+
+// A predicate that checks the key of a TestProperty against a known key.
+//
+// TestPropertyKeyIs is copyable.
+class TestPropertyKeyIs {
+ public:
+ // Constructor.
+ //
+ // TestPropertyKeyIs has NO default constructor.
+ explicit TestPropertyKeyIs(const char* key)
+ : key_(key) {}
+
+ // Returns true iff the test name of test property matches on key_.
+ bool operator()(const TestProperty& test_property) const {
+ return String(test_property.key()).Compare(key_) == 0;
+ }
+
+ private:
+ String key_;
+};
+
+// Class UnitTestOptions.
+//
+// This class contains functions for processing options the user
+// specifies when running the tests. It has only static members.
+//
+// In most cases, the user can specify an option using either an
+// environment variable or a command line flag. E.g. you can set the
+// test filter using either GTEST_FILTER or --gtest_filter. If both
+// the variable and the flag are present, the latter overrides the
+// former.
+class GTEST_API_ UnitTestOptions {
+ public:
+ // Functions for processing the gtest_output flag.
+
+ // Returns the output format, or "" for normal printed output.
+ static String GetOutputFormat();
+
+ // Returns the absolute path of the requested output file, or the
+ // default (test_detail.xml in the original working directory) if
+ // none was explicitly specified.
+ static String GetAbsolutePathToOutputFile();
+
+ // Functions for processing the gtest_filter flag.
+
+ // Returns true iff the wildcard pattern matches the string. The
+ // first ':' or '\0' character in pattern marks the end of it.
+ //
+ // This recursive algorithm isn't very efficient, but is clear and
+ // works well enough for matching test names, which are short.
+ static bool PatternMatchesString(const char *pattern, const char *str);
+
+ // Returns true iff the user-specified filter matches the test case
+ // name and the test name.
+ static bool FilterMatchesTest(const String &test_case_name,
+ const String &test_name);
+
+#if GTEST_OS_WINDOWS
+ // Function for supporting the gtest_catch_exception flag.
+
+ // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
+ // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
+ // This function is useful as an __except condition.
+ static int GTestShouldProcessSEH(DWORD exception_code);
+#endif // GTEST_OS_WINDOWS
+
+ // Returns true if "name" matches the ':' separated list of glob-style
+ // filters in "filter".
+ static bool MatchesFilter(const String& name, const char* filter);
+};
+
+// Returns the current application's name, removing directory path if that
+// is present. Used by UnitTestOptions::GetOutputFile.
+GTEST_API_ FilePath GetCurrentExecutableName();
+
+// The role interface for getting the OS stack trace as a string.
+class OsStackTraceGetterInterface {
+ public:
+ OsStackTraceGetterInterface() {}
+ virtual ~OsStackTraceGetterInterface() {}
+
+ // Returns the current OS stack trace as a String. Parameters:
+ //
+ // max_depth - the maximum number of stack frames to be included
+ // in the trace.
+ // skip_count - the number of top frames to be skipped; doesn't count
+ // against max_depth.
+ virtual String CurrentStackTrace(int max_depth, int skip_count) = 0;
+
+ // UponLeavingGTest() should be called immediately before Google Test calls
+ // user code. It saves some information about the current stack that
+ // CurrentStackTrace() will use to find and hide Google Test stack frames.
+ virtual void UponLeavingGTest() = 0;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface);
+};
+
+// A working implementation of the OsStackTraceGetterInterface interface.
+class OsStackTraceGetter : public OsStackTraceGetterInterface {
+ public:
+ OsStackTraceGetter() : caller_frame_(NULL) {}
+ virtual String CurrentStackTrace(int max_depth, int skip_count);
+ virtual void UponLeavingGTest();
+
+ // This string is inserted in place of stack frames that are part of
+ // Google Test's implementation.
+ static const char* const kElidedFramesMarker;
+
+ private:
+ Mutex mutex_; // protects all internal state
+
+ // We save the stack frame below the frame that calls user code.
+ // We do this because the address of the frame immediately below
+ // the user code changes between the call to UponLeavingGTest()
+ // and any calls to CurrentStackTrace() from within the user code.
+ void* caller_frame_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter);
+};
+
+// Information about a Google Test trace point.
+struct TraceInfo {
+ const char* file;
+ int line;
+ String message;
+};
+
+// This is the default global test part result reporter used in UnitTestImpl.
+// This class should only be used by UnitTestImpl.
+class DefaultGlobalTestPartResultReporter
+ : public TestPartResultReporterInterface {
+ public:
+ explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test);
+ // Implements the TestPartResultReporterInterface. Reports the test part
+ // result in the current test.
+ virtual void ReportTestPartResult(const TestPartResult& result);
+
+ private:
+ UnitTestImpl* const unit_test_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter);
+};
+
+// This is the default per thread test part result reporter used in
+// UnitTestImpl. This class should only be used by UnitTestImpl.
+class DefaultPerThreadTestPartResultReporter
+ : public TestPartResultReporterInterface {
+ public:
+ explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test);
+ // Implements the TestPartResultReporterInterface. The implementation just
+ // delegates to the current global test part result reporter of *unit_test_.
+ virtual void ReportTestPartResult(const TestPartResult& result);
+
+ private:
+ UnitTestImpl* const unit_test_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter);
+};
+
+// The private implementation of the UnitTest class. We don't protect
+// the methods under a mutex, as this class is not accessible by a
+// user and the UnitTest class that delegates work to this class does
+// proper locking.
+class GTEST_API_ UnitTestImpl {
+ public:
+ explicit UnitTestImpl(UnitTest* parent);
+ virtual ~UnitTestImpl();
+
+ // There are two different ways to register your own TestPartResultReporter.
+ // You can register your own repoter to listen either only for test results
+ // from the current thread or for results from all threads.
+ // By default, each per-thread test result repoter just passes a new
+ // TestPartResult to the global test result reporter, which registers the
+ // test part result for the currently running test.
+
+ // Returns the global test part result reporter.
+ TestPartResultReporterInterface* GetGlobalTestPartResultReporter();
+
+ // Sets the global test part result reporter.
+ void SetGlobalTestPartResultReporter(
+ TestPartResultReporterInterface* reporter);
+
+ // Returns the test part result reporter for the current thread.
+ TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread();
+
+ // Sets the test part result reporter for the current thread.
+ void SetTestPartResultReporterForCurrentThread(
+ TestPartResultReporterInterface* reporter);
+
+ // Gets the number of successful test cases.
+ int successful_test_case_count() const;
+
+ // Gets the number of failed test cases.
+ int failed_test_case_count() const;
+
+ // Gets the number of all test cases.
+ int total_test_case_count() const;
+
+ // Gets the number of all test cases that contain at least one test
+ // that should run.
+ int test_case_to_run_count() const;
+
+ // Gets the number of successful tests.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests.
+ int disabled_test_count() const;
+
+ // Gets the number of all tests.
+ int total_test_count() const;
+
+ // Gets the number of tests that should run.
+ int test_to_run_count() const;
+
+ // Gets the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns true iff the unit test passed (i.e. all test cases passed).
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the unit test failed (i.e. some test case failed
+ // or something outside of all tests failed).
+ bool Failed() const {
+ return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed();
+ }
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ const TestCase* GetTestCase(int i) const {
+ const int index = GetElementOr(test_case_indices_, i, -1);
+ return index < 0 ? NULL : test_cases_[i];
+ }
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ TestCase* GetMutableTestCase(int i) {
+ const int index = GetElementOr(test_case_indices_, i, -1);
+ return index < 0 ? NULL : test_cases_[index];
+ }
+
+ // Provides access to the event listener list.
+ TestEventListeners* listeners() { return &listeners_; }
+
+ // Returns the TestResult for the test that's currently running, or
+ // the TestResult for the ad hoc test if no test is running.
+ TestResult* current_test_result();
+
+ // Returns the TestResult for the ad hoc test.
+ const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; }
+
+ // Sets the OS stack trace getter.
+ //
+ // Does nothing if the input and the current OS stack trace getter
+ // are the same; otherwise, deletes the old getter and makes the
+ // input the current getter.
+ void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter);
+
+ // Returns the current OS stack trace getter if it is not NULL;
+ // otherwise, creates an OsStackTraceGetter, makes it the current
+ // getter, and returns it.
+ OsStackTraceGetterInterface* os_stack_trace_getter();
+
+ // Returns the current OS stack trace as a String.
+ //
+ // The maximum number of stack frames to be included is specified by
+ // the gtest_stack_trace_depth flag. The skip_count parameter
+ // specifies the number of top frames to be skipped, which doesn't
+ // count against the number of frames to be included.
+ //
+ // For example, if Foo() calls Bar(), which in turn calls
+ // CurrentOsStackTraceExceptTop(1), Foo() will be included in the
+ // trace but Bar() and CurrentOsStackTraceExceptTop() won't.
+ String CurrentOsStackTraceExceptTop(int skip_count);
+
+ // Finds and returns a TestCase with the given name. If one doesn't
+ // exist, creates one and returns it.
+ //
+ // Arguments:
+ //
+ // test_case_name: name of the test case
+ // type_param: the name of the test's type parameter, or NULL if
+ // this is not a typed or a type-parameterized test.
+ // set_up_tc: pointer to the function that sets up the test case
+ // tear_down_tc: pointer to the function that tears down the test case
+ TestCase* GetTestCase(const char* test_case_name,
+ const char* type_param,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc);
+
+ // Adds a TestInfo to the unit test.
+ //
+ // Arguments:
+ //
+ // set_up_tc: pointer to the function that sets up the test case
+ // tear_down_tc: pointer to the function that tears down the test case
+ // test_info: the TestInfo object
+ void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc,
+ TestInfo* test_info) {
+ // In order to support thread-safe death tests, we need to
+ // remember the original working directory when the test program
+ // was first invoked. We cannot do this in RUN_ALL_TESTS(), as
+ // the user may have changed the current directory before calling
+ // RUN_ALL_TESTS(). Therefore we capture the current directory in
+ // AddTestInfo(), which is called to register a TEST or TEST_F
+ // before main() is reached.
+ if (original_working_dir_.IsEmpty()) {
+ original_working_dir_.Set(FilePath::GetCurrentDir());
+ GTEST_CHECK_(!original_working_dir_.IsEmpty())
+ << "Failed to get the current working directory.";
+ }
+
+ GetTestCase(test_info->test_case_name(),
+ test_info->type_param(),
+ set_up_tc,
+ tear_down_tc)->AddTestInfo(test_info);
+ }
+
+#if GTEST_HAS_PARAM_TEST
+ // Returns ParameterizedTestCaseRegistry object used to keep track of
+ // value-parameterized tests and instantiate and register them.
+ internal::ParameterizedTestCaseRegistry& parameterized_test_registry() {
+ return parameterized_test_registry_;
+ }
+#endif // GTEST_HAS_PARAM_TEST
+
+ // Sets the TestCase object for the test that's currently running.
+ void set_current_test_case(TestCase* a_current_test_case) {
+ current_test_case_ = a_current_test_case;
+ }
+
+ // Sets the TestInfo object for the test that's currently running. If
+ // current_test_info is NULL, the assertion results will be stored in
+ // ad_hoc_test_result_.
+ void set_current_test_info(TestInfo* a_current_test_info) {
+ current_test_info_ = a_current_test_info;
+ }
+
+ // Registers all parameterized tests defined using TEST_P and
+ // INSTANTIATE_TEST_CASE_P, creating regular tests for each test/parameter
+ // combination. This method can be called more then once; it has guards
+ // protecting from registering the tests more then once. If
+ // value-parameterized tests are disabled, RegisterParameterizedTests is
+ // present but does nothing.
+ void RegisterParameterizedTests();
+
+ // Runs all tests in this UnitTest object, prints the result, and
+ // returns true if all tests are successful. If any exception is
+ // thrown during a test, this test is considered to be failed, but
+ // the rest of the tests will still be run.
+ bool RunAllTests();
+
+ // Clears the results of all tests, except the ad hoc tests.
+ void ClearNonAdHocTestResult() {
+ ForEach(test_cases_, TestCase::ClearTestCaseResult);
+ }
+
+ // Clears the results of ad-hoc test assertions.
+ void ClearAdHocTestResult() {
+ ad_hoc_test_result_.Clear();
+ }
+
+ enum ReactionToSharding {
+ HONOR_SHARDING_PROTOCOL,
+ IGNORE_SHARDING_PROTOCOL
+ };
+
+ // Matches the full name of each test against the user-specified
+ // filter to decide whether the test should run, then records the
+ // result in each TestCase and TestInfo object.
+ // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests
+ // based on sharding variables in the environment.
+ // Returns the number of tests that should run.
+ int FilterTests(ReactionToSharding shard_tests);
+
+ // Prints the names of the tests matching the user-specified filter flag.
+ void ListTestsMatchingFilter();
+
+ const TestCase* current_test_case() const { return current_test_case_; }
+ TestInfo* current_test_info() { return current_test_info_; }
+ const TestInfo* current_test_info() const { return current_test_info_; }
+
+ // Returns the vector of environments that need to be set-up/torn-down
+ // before/after the tests are run.
+ std::vector<Environment*>& environments() { return environments_; }
+
+ // Getters for the per-thread Google Test trace stack.
+ std::vector<TraceInfo>& gtest_trace_stack() {
+ return *(gtest_trace_stack_.pointer());
+ }
+ const std::vector<TraceInfo>& gtest_trace_stack() const {
+ return gtest_trace_stack_.get();
+ }
+
+#if GTEST_HAS_DEATH_TEST
+ void InitDeathTestSubprocessControlInfo() {
+ internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag());
+ }
+ // Returns a pointer to the parsed --gtest_internal_run_death_test
+ // flag, or NULL if that flag was not specified.
+ // This information is useful only in a death test child process.
+ // Must not be called before a call to InitGoogleTest.
+ const InternalRunDeathTestFlag* internal_run_death_test_flag() const {
+ return internal_run_death_test_flag_.get();
+ }
+
+ // Returns a pointer to the current death test factory.
+ internal::DeathTestFactory* death_test_factory() {
+ return death_test_factory_.get();
+ }
+
+ void SuppressTestEventsIfInSubprocess();
+
+ friend class ReplaceDeathTestFactory;
+#endif // GTEST_HAS_DEATH_TEST
+
+ // Initializes the event listener performing XML output as specified by
+ // UnitTestOptions. Must not be called before InitGoogleTest.
+ void ConfigureXmlOutput();
+
+#if GTEST_CAN_STREAM_RESULTS_
+ // Initializes the event listener for streaming test results to a socket.
+ // Must not be called before InitGoogleTest.
+ void ConfigureStreamingOutput();
+#endif
+
+ // Performs initialization dependent upon flag values obtained in
+ // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to
+ // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest
+ // this function is also called from RunAllTests. Since this function can be
+ // called more than once, it has to be idempotent.
+ void PostFlagParsingInit();
+
+ // Gets the random seed used at the start of the current test iteration.
+ int random_seed() const { return random_seed_; }
+
+ // Gets the random number generator.
+ internal::Random* random() { return &random_; }
+
+ // Shuffles all test cases, and the tests within each test case,
+ // making sure that death tests are still run first.
+ void ShuffleTests();
+
+ // Restores the test cases and tests to their order before the first shuffle.
+ void UnshuffleTests();
+
+ // Returns the value of GTEST_FLAG(catch_exceptions) at the moment
+ // UnitTest::Run() starts.
+ bool catch_exceptions() const { return catch_exceptions_; }
+
+ private:
+ friend class ::testing::UnitTest;
+
+ // Used by UnitTest::Run() to capture the state of
+ // GTEST_FLAG(catch_exceptions) at the moment it starts.
+ void set_catch_exceptions(bool value) { catch_exceptions_ = value; }
+
+ // The UnitTest object that owns this implementation object.
+ UnitTest* const parent_;
+
+ // The working directory when the first TEST() or TEST_F() was
+ // executed.
+ internal::FilePath original_working_dir_;
+
+ // The default test part result reporters.
+ DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_;
+ DefaultPerThreadTestPartResultReporter
+ default_per_thread_test_part_result_reporter_;
+
+ // Points to (but doesn't own) the global test part result reporter.
+ TestPartResultReporterInterface* global_test_part_result_repoter_;
+
+ // Protects read and write access to global_test_part_result_reporter_.
+ internal::Mutex global_test_part_result_reporter_mutex_;
+
+ // Points to (but doesn't own) the per-thread test part result reporter.
+ internal::ThreadLocal<TestPartResultReporterInterface*>
+ per_thread_test_part_result_reporter_;
+
+ // The vector of environments that need to be set-up/torn-down
+ // before/after the tests are run.
+ std::vector<Environment*> environments_;
+
+ // The vector of TestCases in their original order. It owns the
+ // elements in the vector.
+ std::vector<TestCase*> test_cases_;
+
+ // Provides a level of indirection for the test case list to allow
+ // easy shuffling and restoring the test case order. The i-th
+ // element of this vector is the index of the i-th test case in the
+ // shuffled order.
+ std::vector<int> test_case_indices_;
+
+#if GTEST_HAS_PARAM_TEST
+ // ParameterizedTestRegistry object used to register value-parameterized
+ // tests.
+ internal::ParameterizedTestCaseRegistry parameterized_test_registry_;
+
+ // Indicates whether RegisterParameterizedTests() has been called already.
+ bool parameterized_tests_registered_;
+#endif // GTEST_HAS_PARAM_TEST
+
+ // Index of the last death test case registered. Initially -1.
+ int last_death_test_case_;
+
+ // This points to the TestCase for the currently running test. It
+ // changes as Google Test goes through one test case after another.
+ // When no test is running, this is set to NULL and Google Test
+ // stores assertion results in ad_hoc_test_result_. Initially NULL.
+ TestCase* current_test_case_;
+
+ // This points to the TestInfo for the currently running test. It
+ // changes as Google Test goes through one test after another. When
+ // no test is running, this is set to NULL and Google Test stores
+ // assertion results in ad_hoc_test_result_. Initially NULL.
+ TestInfo* current_test_info_;
+
+ // Normally, a user only writes assertions inside a TEST or TEST_F,
+ // or inside a function called by a TEST or TEST_F. Since Google
+ // Test keeps track of which test is current running, it can
+ // associate such an assertion with the test it belongs to.
+ //
+ // If an assertion is encountered when no TEST or TEST_F is running,
+ // Google Test attributes the assertion result to an imaginary "ad hoc"
+ // test, and records the result in ad_hoc_test_result_.
+ TestResult ad_hoc_test_result_;
+
+ // The list of event listeners that can be used to track events inside
+ // Google Test.
+ TestEventListeners listeners_;
+
+ // The OS stack trace getter. Will be deleted when the UnitTest
+ // object is destructed. By default, an OsStackTraceGetter is used,
+ // but the user can set this field to use a custom getter if that is
+ // desired.
+ OsStackTraceGetterInterface* os_stack_trace_getter_;
+
+ // True iff PostFlagParsingInit() has been called.
+ bool post_flag_parse_init_performed_;
+
+ // The random number seed used at the beginning of the test run.
+ int random_seed_;
+
+ // Our random number generator.
+ internal::Random random_;
+
+ // How long the test took to run, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+#if GTEST_HAS_DEATH_TEST
+ // The decomposed components of the gtest_internal_run_death_test flag,
+ // parsed when RUN_ALL_TESTS is called.
+ internal::scoped_ptr<InternalRunDeathTestFlag> internal_run_death_test_flag_;
+ internal::scoped_ptr<internal::DeathTestFactory> death_test_factory_;
+#endif // GTEST_HAS_DEATH_TEST
+
+ // A per-thread stack of traces created by the SCOPED_TRACE() macro.
+ internal::ThreadLocal<std::vector<TraceInfo> > gtest_trace_stack_;
+
+ // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests()
+ // starts.
+ bool catch_exceptions_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl);
+}; // class UnitTestImpl
+
+// Convenience function for accessing the global UnitTest
+// implementation object.
+inline UnitTestImpl* GetUnitTestImpl() {
+ return UnitTest::GetInstance()->impl();
+}
+
+#if GTEST_USES_SIMPLE_RE
+
+// Internal helper functions for implementing the simple regular
+// expression matcher.
+GTEST_API_ bool IsInSet(char ch, const char* str);
+GTEST_API_ bool IsAsciiDigit(char ch);
+GTEST_API_ bool IsAsciiPunct(char ch);
+GTEST_API_ bool IsRepeat(char ch);
+GTEST_API_ bool IsAsciiWhiteSpace(char ch);
+GTEST_API_ bool IsAsciiWordChar(char ch);
+GTEST_API_ bool IsValidEscape(char ch);
+GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch);
+GTEST_API_ bool ValidateRegex(const char* regex);
+GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str);
+GTEST_API_ bool MatchRepetitionAndRegexAtHead(
+ bool escaped, char ch, char repeat, const char* regex, const char* str);
+GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str);
+
+#endif // GTEST_USES_SIMPLE_RE
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test.
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv);
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv);
+
+#if GTEST_HAS_DEATH_TEST
+
+// Returns the message describing the last system error, regardless of the
+// platform.
+GTEST_API_ String GetLastErrnoDescription();
+
+# if GTEST_OS_WINDOWS
+// Provides leak-safe Windows kernel handle ownership.
+class AutoHandle {
+ public:
+ AutoHandle() : handle_(INVALID_HANDLE_VALUE) {}
+ explicit AutoHandle(HANDLE handle) : handle_(handle) {}
+
+ ~AutoHandle() { Reset(); }
+
+ HANDLE Get() const { return handle_; }
+ void Reset() { Reset(INVALID_HANDLE_VALUE); }
+ void Reset(HANDLE handle) {
+ if (handle != handle_) {
+ if (handle_ != INVALID_HANDLE_VALUE)
+ ::CloseHandle(handle_);
+ handle_ = handle;
+ }
+ }
+
+ private:
+ HANDLE handle_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle);
+};
+# endif // GTEST_OS_WINDOWS
+
+// Attempts to parse a string into a positive integer pointed to by the
+// number parameter. Returns true if that is possible.
+// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use
+// it here.
+template <typename Integer>
+bool ParseNaturalNumber(const ::std::string& str, Integer* number) {
+ // Fail fast if the given string does not begin with a digit;
+ // this bypasses strtoXXX's "optional leading whitespace and plus
+ // or minus sign" semantics, which are undesirable here.
+ if (str.empty() || !IsDigit(str[0])) {
+ return false;
+ }
+ errno = 0;
+
+ char* end;
+ // BiggestConvertible is the largest integer type that system-provided
+ // string-to-number conversion routines can return.
+
+# if GTEST_OS_WINDOWS && !defined(__GNUC__)
+
+ // MSVC and C++ Builder define __int64 instead of the standard long long.
+ typedef unsigned __int64 BiggestConvertible;
+ const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10);
+
+# else
+
+ typedef unsigned long long BiggestConvertible; // NOLINT
+ const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10);
+
+# endif // GTEST_OS_WINDOWS && !defined(__GNUC__)
+
+ const bool parse_success = *end == '\0' && errno == 0;
+
+ // TODO(vladl@google.com): Convert this to compile time assertion when it is
+ // available.
+ GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed));
+
+ const Integer result = static_cast<Integer>(parsed);
+ if (parse_success && static_cast<BiggestConvertible>(result) == parsed) {
+ *number = result;
+ return true;
+ }
+ return false;
+}
+#endif // GTEST_HAS_DEATH_TEST
+
+// TestResult contains some private methods that should be hidden from
+// Google Test user but are required for testing. This class allow our tests
+// to access them.
+//
+// This class is supplied only for the purpose of testing Google Test's own
+// constructs. Do not use it in user tests, either directly or indirectly.
+class TestResultAccessor {
+ public:
+ static void RecordProperty(TestResult* test_result,
+ const TestProperty& property) {
+ test_result->RecordProperty(property);
+ }
+
+ static void ClearTestPartResults(TestResult* test_result) {
+ test_result->ClearTestPartResults();
+ }
+
+ static const std::vector<testing::TestPartResult>& test_part_results(
+ const TestResult& test_result) {
+ return test_result.test_part_results();
+ }
+};
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_SRC_GTEST_INTERNAL_INL_H_
+#undef GTEST_IMPLEMENTATION_
+
+#if GTEST_OS_WINDOWS
+# define vsnprintf _vsnprintf
+#endif // GTEST_OS_WINDOWS
+
+namespace testing {
+
+using internal::CountIf;
+using internal::ForEach;
+using internal::GetElementOr;
+using internal::Shuffle;
+
+// Constants.
+
+// A test whose test case name or test name matches this filter is
+// disabled and not run.
+static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*";
+
+// A test case whose name matches this filter is considered a death
+// test case and will be run before test cases whose name doesn't
+// match this filter.
+static const char kDeathTestCaseFilter[] = "*DeathTest:*DeathTest/*";
+
+// A test filter that matches everything.
+static const char kUniversalFilter[] = "*";
+
+// The default output file for XML output.
+static const char kDefaultOutputFile[] = "test_detail.xml";
+
+// The environment variable name for the test shard index.
+static const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
+// The environment variable name for the total number of test shards.
+static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
+// The environment variable name for the test shard status file.
+static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE";
+
+namespace internal {
+
+// The text used in failure messages to indicate the start of the
+// stack trace.
+const char kStackTraceMarker[] = "\nStack trace:\n";
+
+// g_help_flag is true iff the --help flag or an equivalent form is
+// specified on the command line.
+bool g_help_flag = false;
+
+} // namespace internal
+
+GTEST_DEFINE_bool_(
+ also_run_disabled_tests,
+ internal::BoolFromGTestEnv("also_run_disabled_tests", false),
+ "Run disabled tests too, in addition to the tests normally being run.");
+
+GTEST_DEFINE_bool_(
+ break_on_failure,
+ internal::BoolFromGTestEnv("break_on_failure", false),
+ "True iff a failed assertion should be a debugger break-point.");
+
+GTEST_DEFINE_bool_(
+ catch_exceptions,
+ internal::BoolFromGTestEnv("catch_exceptions", true),
+ "True iff " GTEST_NAME_
+ " should catch exceptions and treat them as test failures.");
+
+GTEST_DEFINE_string_(
+ color,
+ internal::StringFromGTestEnv("color", "auto"),
+ "Whether to use colors in the output. Valid values: yes, no, "
+ "and auto. 'auto' means to use colors if the output is "
+ "being sent to a terminal and the TERM environment variable "
+ "is set to xterm, xterm-color, xterm-256color, linux or cygwin.");
+
+GTEST_DEFINE_string_(
+ filter,
+ internal::StringFromGTestEnv("filter", kUniversalFilter),
+ "A colon-separated list of glob (not regex) patterns "
+ "for filtering the tests to run, optionally followed by a "
+ "'-' and a : separated list of negative patterns (tests to "
+ "exclude). A test is run if it matches one of the positive "
+ "patterns and does not match any of the negative patterns.");
+
+GTEST_DEFINE_bool_(list_tests, false,
+ "List all tests without running them.");
+
+GTEST_DEFINE_string_(
+ output,
+ internal::StringFromGTestEnv("output", ""),
+ "A format (currently must be \"xml\"), optionally followed "
+ "by a colon and an output file name or directory. A directory "
+ "is indicated by a trailing pathname separator. "
+ "Examples: \"xml:filename.xml\", \"xml::directoryname/\". "
+ "If a directory is specified, output files will be created "
+ "within that directory, with file-names based on the test "
+ "executable's name and, if necessary, made unique by adding "
+ "digits.");
+
+GTEST_DEFINE_bool_(
+ print_time,
+ internal::BoolFromGTestEnv("print_time", true),
+ "True iff " GTEST_NAME_
+ " should display elapsed time in text output.");
+
+GTEST_DEFINE_int32_(
+ random_seed,
+ internal::Int32FromGTestEnv("random_seed", 0),
+ "Random number seed to use when shuffling test orders. Must be in range "
+ "[1, 99999], or 0 to use a seed based on the current time.");
+
+GTEST_DEFINE_int32_(
+ repeat,
+ internal::Int32FromGTestEnv("repeat", 1),
+ "How many times to repeat each test. Specify a negative number "
+ "for repeating forever. Useful for shaking out flaky tests.");
+
+GTEST_DEFINE_bool_(
+ show_internal_stack_frames, false,
+ "True iff " GTEST_NAME_ " should include internal stack frames when "
+ "printing test failure stack traces.");
+
+GTEST_DEFINE_bool_(
+ shuffle,
+ internal::BoolFromGTestEnv("shuffle", false),
+ "True iff " GTEST_NAME_
+ " should randomize tests' order on every run.");
+
+GTEST_DEFINE_int32_(
+ stack_trace_depth,
+ internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth),
+ "The maximum number of stack frames to print when an "
+ "assertion fails. The valid range is 0 through 100, inclusive.");
+
+GTEST_DEFINE_string_(
+ stream_result_to,
+ internal::StringFromGTestEnv("stream_result_to", ""),
+ "This flag specifies the host name and the port number on which to stream "
+ "test results. Example: \"localhost:555\". The flag is effective only on "
+ "Linux.");
+
+GTEST_DEFINE_bool_(
+ throw_on_failure,
+ internal::BoolFromGTestEnv("throw_on_failure", false),
+ "When this flag is specified, a failed assertion will throw an exception "
+ "if exceptions are enabled or exit the program with a non-zero code "
+ "otherwise.");
+
+namespace internal {
+
+// Generates a random number from [0, range), using a Linear
+// Congruential Generator (LCG). Crashes if 'range' is 0 or greater
+// than kMaxRange.
+UInt32 Random::Generate(UInt32 range) {
+ // These constants are the same as are used in glibc's rand(3).
+ state_ = (1103515245U*state_ + 12345U) % kMaxRange;
+
+ GTEST_CHECK_(range > 0)
+ << "Cannot generate a number in the range [0, 0).";
+ GTEST_CHECK_(range <= kMaxRange)
+ << "Generation of a number in [0, " << range << ") was requested, "
+ << "but this can only generate numbers in [0, " << kMaxRange << ").";
+
+ // Converting via modulus introduces a bit of downward bias, but
+ // it's simple, and a linear congruential generator isn't too good
+ // to begin with.
+ return state_ % range;
+}
+
+// GTestIsInitialized() returns true iff the user has initialized
+// Google Test. Useful for catching the user mistake of not initializing
+// Google Test before calling RUN_ALL_TESTS().
+//
+// A user must call testing::InitGoogleTest() to initialize Google
+// Test. g_init_gtest_count is set to the number of times
+// InitGoogleTest() has been called. We don't protect this variable
+// under a mutex as it is only accessed in the main thread.
+int g_init_gtest_count = 0;
+static bool GTestIsInitialized() { return g_init_gtest_count != 0; }
+
+// Iterates over a vector of TestCases, keeping a running sum of the
+// results of calling a given int-returning method on each.
+// Returns the sum.
+static int SumOverTestCaseList(const std::vector<TestCase*>& case_list,
+ int (TestCase::*method)() const) {
+ int sum = 0;
+ for (size_t i = 0; i < case_list.size(); i++) {
+ sum += (case_list[i]->*method)();
+ }
+ return sum;
+}
+
+// Returns true iff the test case passed.
+static bool TestCasePassed(const TestCase* test_case) {
+ return test_case->should_run() && test_case->Passed();
+}
+
+// Returns true iff the test case failed.
+static bool TestCaseFailed(const TestCase* test_case) {
+ return test_case->should_run() && test_case->Failed();
+}
+
+// Returns true iff test_case contains at least one test that should
+// run.
+static bool ShouldRunTestCase(const TestCase* test_case) {
+ return test_case->should_run();
+}
+
+// AssertHelper constructor.
+AssertHelper::AssertHelper(TestPartResult::Type type,
+ const char* file,
+ int line,
+ const char* message)
+ : data_(new AssertHelperData(type, file, line, message)) {
+}
+
+AssertHelper::~AssertHelper() {
+ delete data_;
+}
+
+// Message assignment, for assertion streaming support.
+void AssertHelper::operator=(const Message& message) const {
+ UnitTest::GetInstance()->
+ AddTestPartResult(data_->type, data_->file, data_->line,
+ AppendUserMessage(data_->message, message),
+ UnitTest::GetInstance()->impl()
+ ->CurrentOsStackTraceExceptTop(1)
+ // Skips the stack frame for this function itself.
+ ); // NOLINT
+}
+
+// Mutex for linked pointers.
+GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex);
+
+// Application pathname gotten in InitGoogleTest.
+String g_executable_path;
+
+// Returns the current application's name, removing directory path if that
+// is present.
+FilePath GetCurrentExecutableName() {
+ FilePath result;
+
+#if GTEST_OS_WINDOWS
+ result.Set(FilePath(g_executable_path).RemoveExtension("exe"));
+#else
+ result.Set(FilePath(g_executable_path));
+#endif // GTEST_OS_WINDOWS
+
+ return result.RemoveDirectoryName();
+}
+
+// Functions for processing the gtest_output flag.
+
+// Returns the output format, or "" for normal printed output.
+String UnitTestOptions::GetOutputFormat() {
+ const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
+ if (gtest_output_flag == NULL) return String("");
+
+ const char* const colon = strchr(gtest_output_flag, ':');
+ return (colon == NULL) ?
+ String(gtest_output_flag) :
+ String(gtest_output_flag, colon - gtest_output_flag);
+}
+
+// Returns the name of the requested output file, or the default if none
+// was explicitly specified.
+String UnitTestOptions::GetAbsolutePathToOutputFile() {
+ const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
+ if (gtest_output_flag == NULL)
+ return String("");
+
+ const char* const colon = strchr(gtest_output_flag, ':');
+ if (colon == NULL)
+ return String(internal::FilePath::ConcatPaths(
+ internal::FilePath(
+ UnitTest::GetInstance()->original_working_dir()),
+ internal::FilePath(kDefaultOutputFile)).ToString() );
+
+ internal::FilePath output_name(colon + 1);
+ if (!output_name.IsAbsolutePath())
+ // TODO(wan@google.com): on Windows \some\path is not an absolute
+ // path (as its meaning depends on the current drive), yet the
+ // following logic for turning it into an absolute path is wrong.
+ // Fix it.
+ output_name = internal::FilePath::ConcatPaths(
+ internal::FilePath(UnitTest::GetInstance()->original_working_dir()),
+ internal::FilePath(colon + 1));
+
+ if (!output_name.IsDirectory())
+ return output_name.ToString();
+
+ internal::FilePath result(internal::FilePath::GenerateUniqueFileName(
+ output_name, internal::GetCurrentExecutableName(),
+ GetOutputFormat().c_str()));
+ return result.ToString();
+}
+
+// Returns true iff the wildcard pattern matches the string. The
+// first ':' or '\0' character in pattern marks the end of it.
+//
+// This recursive algorithm isn't very efficient, but is clear and
+// works well enough for matching test names, which are short.
+bool UnitTestOptions::PatternMatchesString(const char *pattern,
+ const char *str) {
+ switch (*pattern) {
+ case '\0':
+ case ':': // Either ':' or '\0' marks the end of the pattern.
+ return *str == '\0';
+ case '?': // Matches any single character.
+ return *str != '\0' && PatternMatchesString(pattern + 1, str + 1);
+ case '*': // Matches any string (possibly empty) of characters.
+ return (*str != '\0' && PatternMatchesString(pattern, str + 1)) ||
+ PatternMatchesString(pattern + 1, str);
+ default: // Non-special character. Matches itself.
+ return *pattern == *str &&
+ PatternMatchesString(pattern + 1, str + 1);
+ }
+}
+
+bool UnitTestOptions::MatchesFilter(const String& name, const char* filter) {
+ const char *cur_pattern = filter;
+ for (;;) {
+ if (PatternMatchesString(cur_pattern, name.c_str())) {
+ return true;
+ }
+
+ // Finds the next pattern in the filter.
+ cur_pattern = strchr(cur_pattern, ':');
+
+ // Returns if no more pattern can be found.
+ if (cur_pattern == NULL) {
+ return false;
+ }
+
+ // Skips the pattern separater (the ':' character).
+ cur_pattern++;
+ }
+}
+
+// TODO(keithray): move String function implementations to gtest-string.cc.
+
+// Returns true iff the user-specified filter matches the test case
+// name and the test name.
+bool UnitTestOptions::FilterMatchesTest(const String &test_case_name,
+ const String &test_name) {
+ const String& full_name = String::Format("%s.%s",
+ test_case_name.c_str(),
+ test_name.c_str());
+
+ // Split --gtest_filter at '-', if there is one, to separate into
+ // positive filter and negative filter portions
+ const char* const p = GTEST_FLAG(filter).c_str();
+ const char* const dash = strchr(p, '-');
+ String positive;
+ String negative;
+ if (dash == NULL) {
+ positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter
+ negative = String("");
+ } else {
+ positive = String(p, dash - p); // Everything up to the dash
+ negative = String(dash+1); // Everything after the dash
+ if (positive.empty()) {
+ // Treat '-test1' as the same as '*-test1'
+ positive = kUniversalFilter;
+ }
+ }
+
+ // A filter is a colon-separated list of patterns. It matches a
+ // test if any pattern in it matches the test.
+ return (MatchesFilter(full_name, positive.c_str()) &&
+ !MatchesFilter(full_name, negative.c_str()));
+}
+
+#if GTEST_HAS_SEH
+// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
+// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
+// This function is useful as an __except condition.
+int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) {
+ // Google Test should handle a SEH exception if:
+ // 1. the user wants it to, AND
+ // 2. this is not a breakpoint exception, AND
+ // 3. this is not a C++ exception (VC++ implements them via SEH,
+ // apparently).
+ //
+ // SEH exception code for C++ exceptions.
+ // (see http://support.microsoft.com/kb/185294 for more information).
+ const DWORD kCxxExceptionCode = 0xe06d7363;
+
+ bool should_handle = true;
+
+ if (!GTEST_FLAG(catch_exceptions))
+ should_handle = false;
+ else if (exception_code == EXCEPTION_BREAKPOINT)
+ should_handle = false;
+ else if (exception_code == kCxxExceptionCode)
+ should_handle = false;
+
+ return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH;
+}
+#endif // GTEST_HAS_SEH
+
+} // namespace internal
+
+// The c'tor sets this object as the test part result reporter used by
+// Google Test. The 'result' parameter specifies where to report the
+// results. Intercepts only failures from the current thread.
+ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
+ TestPartResultArray* result)
+ : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD),
+ result_(result) {
+ Init();
+}
+
+// The c'tor sets this object as the test part result reporter used by
+// Google Test. The 'result' parameter specifies where to report the
+// results.
+ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
+ InterceptMode intercept_mode, TestPartResultArray* result)
+ : intercept_mode_(intercept_mode),
+ result_(result) {
+ Init();
+}
+
+void ScopedFakeTestPartResultReporter::Init() {
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
+ old_reporter_ = impl->GetGlobalTestPartResultReporter();
+ impl->SetGlobalTestPartResultReporter(this);
+ } else {
+ old_reporter_ = impl->GetTestPartResultReporterForCurrentThread();
+ impl->SetTestPartResultReporterForCurrentThread(this);
+ }
+}
+
+// The d'tor restores the test part result reporter used by Google Test
+// before.
+ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() {
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
+ impl->SetGlobalTestPartResultReporter(old_reporter_);
+ } else {
+ impl->SetTestPartResultReporterForCurrentThread(old_reporter_);
+ }
+}
+
+// Increments the test part result count and remembers the result.
+// This method is from the TestPartResultReporterInterface interface.
+void ScopedFakeTestPartResultReporter::ReportTestPartResult(
+ const TestPartResult& result) {
+ result_->Append(result);
+}
+
+namespace internal {
+
+// Returns the type ID of ::testing::Test. We should always call this
+// instead of GetTypeId< ::testing::Test>() to get the type ID of
+// testing::Test. This is to work around a suspected linker bug when
+// using Google Test as a framework on Mac OS X. The bug causes
+// GetTypeId< ::testing::Test>() to return different values depending
+// on whether the call is from the Google Test framework itself or
+// from user test code. GetTestTypeId() is guaranteed to always
+// return the same value, as it always calls GetTypeId<>() from the
+// gtest.cc, which is within the Google Test framework.
+TypeId GetTestTypeId() {
+ return GetTypeId<Test>();
+}
+
+// The value of GetTestTypeId() as seen from within the Google Test
+// library. This is solely for testing GetTestTypeId().
+extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId();
+
+// This predicate-formatter checks that 'results' contains a test part
+// failure of the given type and that the failure message contains the
+// given substring.
+AssertionResult HasOneFailure(const char* /* results_expr */,
+ const char* /* type_expr */,
+ const char* /* substr_expr */,
+ const TestPartResultArray& results,
+ TestPartResult::Type type,
+ const string& substr) {
+ const String expected(type == TestPartResult::kFatalFailure ?
+ "1 fatal failure" :
+ "1 non-fatal failure");
+ Message msg;
+ if (results.size() != 1) {
+ msg << "Expected: " << expected << "\n"
+ << " Actual: " << results.size() << " failures";
+ for (int i = 0; i < results.size(); i++) {
+ msg << "\n" << results.GetTestPartResult(i);
+ }
+ return AssertionFailure() << msg;
+ }
+
+ const TestPartResult& r = results.GetTestPartResult(0);
+ if (r.type() != type) {
+ return AssertionFailure() << "Expected: " << expected << "\n"
+ << " Actual:\n"
+ << r;
+ }
+
+ if (strstr(r.message(), substr.c_str()) == NULL) {
+ return AssertionFailure() << "Expected: " << expected << " containing \""
+ << substr << "\"\n"
+ << " Actual:\n"
+ << r;
+ }
+
+ return AssertionSuccess();
+}
+
+// The constructor of SingleFailureChecker remembers where to look up
+// test part results, what type of failure we expect, and what
+// substring the failure message should contain.
+SingleFailureChecker:: SingleFailureChecker(
+ const TestPartResultArray* results,
+ TestPartResult::Type type,
+ const string& substr)
+ : results_(results),
+ type_(type),
+ substr_(substr) {}
+
+// The destructor of SingleFailureChecker verifies that the given
+// TestPartResultArray contains exactly one failure that has the given
+// type and contains the given substring. If that's not the case, a
+// non-fatal failure will be generated.
+SingleFailureChecker::~SingleFailureChecker() {
+ EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_);
+}
+
+DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter(
+ UnitTestImpl* unit_test) : unit_test_(unit_test) {}
+
+void DefaultGlobalTestPartResultReporter::ReportTestPartResult(
+ const TestPartResult& result) {
+ unit_test_->current_test_result()->AddTestPartResult(result);
+ unit_test_->listeners()->repeater()->OnTestPartResult(result);
+}
+
+DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter(
+ UnitTestImpl* unit_test) : unit_test_(unit_test) {}
+
+void DefaultPerThreadTestPartResultReporter::ReportTestPartResult(
+ const TestPartResult& result) {
+ unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result);
+}
+
+// Returns the global test part result reporter.
+TestPartResultReporterInterface*
+UnitTestImpl::GetGlobalTestPartResultReporter() {
+ internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
+ return global_test_part_result_repoter_;
+}
+
+// Sets the global test part result reporter.
+void UnitTestImpl::SetGlobalTestPartResultReporter(
+ TestPartResultReporterInterface* reporter) {
+ internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
+ global_test_part_result_repoter_ = reporter;
+}
+
+// Returns the test part result reporter for the current thread.
+TestPartResultReporterInterface*
+UnitTestImpl::GetTestPartResultReporterForCurrentThread() {
+ return per_thread_test_part_result_reporter_.get();
+}
+
+// Sets the test part result reporter for the current thread.
+void UnitTestImpl::SetTestPartResultReporterForCurrentThread(
+ TestPartResultReporterInterface* reporter) {
+ per_thread_test_part_result_reporter_.set(reporter);
+}
+
+// Gets the number of successful test cases.
+int UnitTestImpl::successful_test_case_count() const {
+ return CountIf(test_cases_, TestCasePassed);
+}
+
+// Gets the number of failed test cases.
+int UnitTestImpl::failed_test_case_count() const {
+ return CountIf(test_cases_, TestCaseFailed);
+}
+
+// Gets the number of all test cases.
+int UnitTestImpl::total_test_case_count() const {
+ return static_cast<int>(test_cases_.size());
+}
+
+// Gets the number of all test cases that contain at least one test
+// that should run.
+int UnitTestImpl::test_case_to_run_count() const {
+ return CountIf(test_cases_, ShouldRunTestCase);
+}
+
+// Gets the number of successful tests.
+int UnitTestImpl::successful_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count);
+}
+
+// Gets the number of failed tests.
+int UnitTestImpl::failed_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count);
+}
+
+// Gets the number of disabled tests.
+int UnitTestImpl::disabled_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count);
+}
+
+// Gets the number of all tests.
+int UnitTestImpl::total_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::total_test_count);
+}
+
+// Gets the number of tests that should run.
+int UnitTestImpl::test_to_run_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count);
+}
+
+// Returns the current OS stack trace as a String.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// CurrentOsStackTraceExceptTop(1), Foo() will be included in the
+// trace but Bar() and CurrentOsStackTraceExceptTop() won't.
+String UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) {
+ (void)skip_count;
+ return String("");
+}
+
+// Returns the current time in milliseconds.
+TimeInMillis GetTimeInMillis() {
+#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__)
+ // Difference between 1970-01-01 and 1601-01-01 in milliseconds.
+ // http://analogous.blogspot.com/2005/04/epoch.html
+ const TimeInMillis kJavaEpochToWinFileTimeDelta =
+ static_cast<TimeInMillis>(116444736UL) * 100000UL;
+ const DWORD kTenthMicrosInMilliSecond = 10000;
+
+ SYSTEMTIME now_systime;
+ FILETIME now_filetime;
+ ULARGE_INTEGER now_int64;
+ // TODO(kenton@google.com): Shouldn't this just use
+ // GetSystemTimeAsFileTime()?
+ GetSystemTime(&now_systime);
+ if (SystemTimeToFileTime(&now_systime, &now_filetime)) {
+ now_int64.LowPart = now_filetime.dwLowDateTime;
+ now_int64.HighPart = now_filetime.dwHighDateTime;
+ now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) -
+ kJavaEpochToWinFileTimeDelta;
+ return now_int64.QuadPart;
+ }
+ return 0;
+#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_
+ __timeb64 now;
+
+# ifdef _MSC_VER
+
+ // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996
+ // (deprecated function) there.
+ // TODO(kenton@google.com): Use GetTickCount()? Or use
+ // SystemTimeToFileTime()
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4996) // Temporarily disables warning 4996.
+ _ftime64(&now);
+# pragma warning(pop) // Restores the warning state.
+# else
+
+ _ftime64(&now);
+
+# endif // _MSC_VER
+
+ return static_cast<TimeInMillis>(now.time) * 1000 + now.millitm;
+#elif GTEST_HAS_GETTIMEOFDAY_
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ return static_cast<TimeInMillis>(now.tv_sec) * 1000 + now.tv_usec / 1000;
+#else
+# error "Don't know how to get the current time on your system."
+#endif
+}
+
+// Utilities
+
+// class String
+
+// Returns the input enclosed in double quotes if it's not NULL;
+// otherwise returns "(null)". For example, "\"Hello\"" is returned
+// for input "Hello".
+//
+// This is useful for printing a C string in the syntax of a literal.
+//
+// Known issue: escape sequences are not handled yet.
+String String::ShowCStringQuoted(const char* c_str) {
+ return c_str ? String::Format("\"%s\"", c_str) : String("(null)");
+}
+
+// Copies at most length characters from str into a newly-allocated
+// piece of memory of size length+1. The memory is allocated with new[].
+// A terminating null byte is written to the memory, and a pointer to it
+// is returned. If str is NULL, NULL is returned.
+static char* CloneString(const char* str, size_t length) {
+ if (str == NULL) {
+ return NULL;
+ } else {
+ char* const clone = new char[length + 1];
+ posix::StrNCpy(clone, str, length);
+ clone[length] = '\0';
+ return clone;
+ }
+}
+
+// Clones a 0-terminated C string, allocating memory using new. The
+// caller is responsible for deleting[] the return value. Returns the
+// cloned string, or NULL if the input is NULL.
+const char * String::CloneCString(const char* c_str) {
+ return (c_str == NULL) ?
+ NULL : CloneString(c_str, strlen(c_str));
+}
+
+#if GTEST_OS_WINDOWS_MOBILE
+// Creates a UTF-16 wide string from the given ANSI string, allocating
+// memory using new. The caller is responsible for deleting the return
+// value using delete[]. Returns the wide string, or NULL if the
+// input is NULL.
+LPCWSTR String::AnsiToUtf16(const char* ansi) {
+ if (!ansi) return NULL;
+ const int length = strlen(ansi);
+ const int unicode_length =
+ MultiByteToWideChar(CP_ACP, 0, ansi, length,
+ NULL, 0);
+ WCHAR* unicode = new WCHAR[unicode_length + 1];
+ MultiByteToWideChar(CP_ACP, 0, ansi, length,
+ unicode, unicode_length);
+ unicode[unicode_length] = 0;
+ return unicode;
+}
+
+// Creates an ANSI string from the given wide string, allocating
+// memory using new. The caller is responsible for deleting the return
+// value using delete[]. Returns the ANSI string, or NULL if the
+// input is NULL.
+const char* String::Utf16ToAnsi(LPCWSTR utf16_str) {
+ if (!utf16_str) return NULL;
+ const int ansi_length =
+ WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,
+ NULL, 0, NULL, NULL);
+ char* ansi = new char[ansi_length + 1];
+ WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,
+ ansi, ansi_length, NULL, NULL);
+ ansi[ansi_length] = 0;
+ return ansi;
+}
+
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+// Compares two C strings. Returns true iff they have the same content.
+//
+// Unlike strcmp(), this function can handle NULL argument(s). A NULL
+// C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::CStringEquals(const char * lhs, const char * rhs) {
+ if ( lhs == NULL ) return rhs == NULL;
+
+ if ( rhs == NULL ) return false;
+
+ return strcmp(lhs, rhs) == 0;
+}
+
+#if GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
+
+// Converts an array of wide chars to a narrow string using the UTF-8
+// encoding, and streams the result to the given Message object.
+static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length,
+ Message* msg) {
+ // TODO(wan): consider allowing a testing::String object to
+ // contain '\0'. This will make it behave more like std::string,
+ // and will allow ToUtf8String() to return the correct encoding
+ // for '\0' s.t. we can get rid of the conditional here (and in
+ // several other places).
+ for (size_t i = 0; i != length; ) { // NOLINT
+ if (wstr[i] != L'\0') {
+ *msg << WideStringToUtf8(wstr + i, static_cast<int>(length - i));
+ while (i != length && wstr[i] != L'\0')
+ i++;
+ } else {
+ *msg << '\0';
+ i++;
+ }
+ }
+}
+
+#endif // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
+
+} // namespace internal
+
+#if GTEST_HAS_STD_WSTRING
+// Converts the given wide string to a narrow string using the UTF-8
+// encoding, and streams the result to this Message object.
+Message& Message::operator <<(const ::std::wstring& wstr) {
+ internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
+ return *this;
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+// Converts the given wide string to a narrow string using the UTF-8
+// encoding, and streams the result to this Message object.
+Message& Message::operator <<(const ::wstring& wstr) {
+ internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
+ return *this;
+}
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+// AssertionResult constructors.
+// Used in EXPECT_TRUE/FALSE(assertion_result).
+AssertionResult::AssertionResult(const AssertionResult& other)
+ : success_(other.success_),
+ message_(other.message_.get() != NULL ?
+ new ::std::string(*other.message_) :
+ static_cast< ::std::string*>(NULL)) {
+}
+
+// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+AssertionResult AssertionResult::operator!() const {
+ AssertionResult negation(!success_);
+ if (message_.get() != NULL)
+ negation << *message_;
+ return negation;
+}
+
+// Makes a successful assertion result.
+AssertionResult AssertionSuccess() {
+ return AssertionResult(true);
+}
+
+// Makes a failed assertion result.
+AssertionResult AssertionFailure() {
+ return AssertionResult(false);
+}
+
+// Makes a failed assertion result with the given failure message.
+// Deprecated; use AssertionFailure() << message.
+AssertionResult AssertionFailure(const Message& message) {
+ return AssertionFailure() << message;
+}
+
+namespace internal {
+
+// Constructs and returns the message for an equality assertion
+// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
+//
+// The first four parameters are the expressions used in the assertion
+// and their values, as strings. For example, for ASSERT_EQ(foo, bar)
+// where foo is 5 and bar is 6, we have:
+//
+// expected_expression: "foo"
+// actual_expression: "bar"
+// expected_value: "5"
+// actual_value: "6"
+//
+// The ignoring_case parameter is true iff the assertion is a
+// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will
+// be inserted into the message.
+AssertionResult EqFailure(const char* expected_expression,
+ const char* actual_expression,
+ const String& expected_value,
+ const String& actual_value,
+ bool ignoring_case) {
+ Message msg;
+ msg << "Value of: " << actual_expression;
+ if (actual_value != actual_expression) {
+ msg << "\n Actual: " << actual_value;
+ }
+
+ msg << "\nExpected: " << expected_expression;
+ if (ignoring_case) {
+ msg << " (ignoring case)";
+ }
+ if (expected_value != expected_expression) {
+ msg << "\nWhich is: " << expected_value;
+ }
+
+ return AssertionFailure() << msg;
+}
+
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+String GetBoolAssertionFailureMessage(const AssertionResult& assertion_result,
+ const char* expression_text,
+ const char* actual_predicate_value,
+ const char* expected_predicate_value) {
+ const char* actual_message = assertion_result.message();
+ Message msg;
+ msg << "Value of: " << expression_text
+ << "\n Actual: " << actual_predicate_value;
+ if (actual_message[0] != '\0')
+ msg << " (" << actual_message << ")";
+ msg << "\nExpected: " << expected_predicate_value;
+ return msg.GetString();
+}
+
+// Helper function for implementing ASSERT_NEAR.
+AssertionResult DoubleNearPredFormat(const char* expr1,
+ const char* expr2,
+ const char* abs_error_expr,
+ double val1,
+ double val2,
+ double abs_error) {
+ const double diff = fabs(val1 - val2);
+ if (diff <= abs_error) return AssertionSuccess();
+
+ // TODO(wan): do not print the value of an expression if it's
+ // already a literal.
+ return AssertionFailure()
+ << "The difference between " << expr1 << " and " << expr2
+ << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n"
+ << expr1 << " evaluates to " << val1 << ",\n"
+ << expr2 << " evaluates to " << val2 << ", and\n"
+ << abs_error_expr << " evaluates to " << abs_error << ".";
+}
+
+
+// Helper template for implementing FloatLE() and DoubleLE().
+template <typename RawType>
+AssertionResult FloatingPointLE(const char* expr1,
+ const char* expr2,
+ RawType val1,
+ RawType val2) {
+ // Returns success if val1 is less than val2,
+ if (val1 < val2) {
+ return AssertionSuccess();
+ }
+
+ // or if val1 is almost equal to val2.
+ const FloatingPoint<RawType> lhs(val1), rhs(val2);
+ if (lhs.AlmostEquals(rhs)) {
+ return AssertionSuccess();
+ }
+
+ // Note that the above two checks will both fail if either val1 or
+ // val2 is NaN, as the IEEE floating-point standard requires that
+ // any predicate involving a NaN must return false.
+
+ ::std::stringstream val1_ss;
+ val1_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << val1;
+
+ ::std::stringstream val2_ss;
+ val2_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << val2;
+
+ return AssertionFailure()
+ << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n"
+ << " Actual: " << StringStreamToString(&val1_ss) << " vs "
+ << StringStreamToString(&val2_ss);
+}
+
+} // namespace internal
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+AssertionResult FloatLE(const char* expr1, const char* expr2,
+ float val1, float val2) {
+ return internal::FloatingPointLE<float>(expr1, expr2, val1, val2);
+}
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+AssertionResult DoubleLE(const char* expr1, const char* expr2,
+ double val1, double val2) {
+ return internal::FloatingPointLE<double>(expr1, expr2, val1, val2);
+}
+
+namespace internal {
+
+// The helper function for {ASSERT|EXPECT}_EQ with int or enum
+// arguments.
+AssertionResult CmpHelperEQ(const char* expected_expression,
+ const char* actual_expression,
+ BiggestInt expected,
+ BiggestInt actual) {
+ if (expected == actual) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ FormatForComparisonFailureMessage(expected, actual),
+ FormatForComparisonFailureMessage(actual, expected),
+ false);
+}
+
+// A macro for implementing the helper functions needed to implement
+// ASSERT_?? and EXPECT_?? with integer or enum arguments. It is here
+// just to avoid copy-and-paste of similar code.
+#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
+AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
+ BiggestInt val1, BiggestInt val2) {\
+ if (val1 op val2) {\
+ return AssertionSuccess();\
+ } else {\
+ return AssertionFailure() \
+ << "Expected: (" << expr1 << ") " #op " (" << expr2\
+ << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\
+ << " vs " << FormatForComparisonFailureMessage(val2, val1);\
+ }\
+}
+
+// Implements the helper function for {ASSERT|EXPECT}_NE with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(NE, !=)
+// Implements the helper function for {ASSERT|EXPECT}_LE with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(LE, <=)
+// Implements the helper function for {ASSERT|EXPECT}_LT with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(LT, < )
+// Implements the helper function for {ASSERT|EXPECT}_GE with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(GE, >=)
+// Implements the helper function for {ASSERT|EXPECT}_GT with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(GT, > )
+
+#undef GTEST_IMPL_CMP_HELPER_
+
+// The helper function for {ASSERT|EXPECT}_STREQ.
+AssertionResult CmpHelperSTREQ(const char* expected_expression,
+ const char* actual_expression,
+ const char* expected,
+ const char* actual) {
+ if (String::CStringEquals(expected, actual)) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ String::ShowCStringQuoted(expected),
+ String::ShowCStringQuoted(actual),
+ false);
+}
+
+// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
+AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,
+ const char* actual_expression,
+ const char* expected,
+ const char* actual) {
+ if (String::CaseInsensitiveCStringEquals(expected, actual)) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ String::ShowCStringQuoted(expected),
+ String::ShowCStringQuoted(actual),
+ true);
+}
+
+// The helper function for {ASSERT|EXPECT}_STRNE.
+AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2) {
+ if (!String::CStringEquals(s1, s2)) {
+ return AssertionSuccess();
+ } else {
+ return AssertionFailure() << "Expected: (" << s1_expression << ") != ("
+ << s2_expression << "), actual: \""
+ << s1 << "\" vs \"" << s2 << "\"";
+ }
+}
+
+// The helper function for {ASSERT|EXPECT}_STRCASENE.
+AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2) {
+ if (!String::CaseInsensitiveCStringEquals(s1, s2)) {
+ return AssertionSuccess();
+ } else {
+ return AssertionFailure()
+ << "Expected: (" << s1_expression << ") != ("
+ << s2_expression << ") (ignoring case), actual: \""
+ << s1 << "\" vs \"" << s2 << "\"";
+ }
+}
+
+} // namespace internal
+
+namespace {
+
+// Helper functions for implementing IsSubString() and IsNotSubstring().
+
+// This group of overloaded functions return true iff needle is a
+// substring of haystack. NULL is considered a substring of itself
+// only.
+
+bool IsSubstringPred(const char* needle, const char* haystack) {
+ if (needle == NULL || haystack == NULL)
+ return needle == haystack;
+
+ return strstr(haystack, needle) != NULL;
+}
+
+bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) {
+ if (needle == NULL || haystack == NULL)
+ return needle == haystack;
+
+ return wcsstr(haystack, needle) != NULL;
+}
+
+// StringType here can be either ::std::string or ::std::wstring.
+template <typename StringType>
+bool IsSubstringPred(const StringType& needle,
+ const StringType& haystack) {
+ return haystack.find(needle) != StringType::npos;
+}
+
+// This function implements either IsSubstring() or IsNotSubstring(),
+// depending on the value of the expected_to_be_substring parameter.
+// StringType here can be const char*, const wchar_t*, ::std::string,
+// or ::std::wstring.
+template <typename StringType>
+AssertionResult IsSubstringImpl(
+ bool expected_to_be_substring,
+ const char* needle_expr, const char* haystack_expr,
+ const StringType& needle, const StringType& haystack) {
+ if (IsSubstringPred(needle, haystack) == expected_to_be_substring)
+ return AssertionSuccess();
+
+ const bool is_wide_string = sizeof(needle[0]) > 1;
+ const char* const begin_string_quote = is_wide_string ? "L\"" : "\"";
+ return AssertionFailure()
+ << "Value of: " << needle_expr << "\n"
+ << " Actual: " << begin_string_quote << needle << "\"\n"
+ << "Expected: " << (expected_to_be_substring ? "" : "not ")
+ << "a substring of " << haystack_expr << "\n"
+ << "Which is: " << begin_string_quote << haystack << "\"";
+}
+
+} // namespace
+
+// IsSubstring() and IsNotSubstring() check whether needle is a
+// substring of haystack (NULL is considered a substring of itself
+// only), and return an appropriate error message when they fail.
+
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+#if GTEST_HAS_STD_WSTRING
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+namespace internal {
+
+#if GTEST_OS_WINDOWS
+
+namespace {
+
+// Helper function for IsHRESULT{SuccessFailure} predicates
+AssertionResult HRESULTFailureHelper(const char* expr,
+ const char* expected,
+ long hr) { // NOLINT
+# if GTEST_OS_WINDOWS_MOBILE
+
+ // Windows CE doesn't support FormatMessage.
+ const char error_text[] = "";
+
+# else
+
+ // Looks up the human-readable system message for the HRESULT code
+ // and since we're not passing any params to FormatMessage, we don't
+ // want inserts expanded.
+ const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS;
+ const DWORD kBufSize = 4096; // String::Format can't exceed this length.
+ // Gets the system's human readable message string for this HRESULT.
+ char error_text[kBufSize] = { '\0' };
+ DWORD message_length = ::FormatMessageA(kFlags,
+ 0, // no source, we're asking system
+ hr, // the error
+ 0, // no line width restrictions
+ error_text, // output buffer
+ kBufSize, // buf size
+ NULL); // no arguments for inserts
+ // Trims tailing white space (FormatMessage leaves a trailing cr-lf)
+ for (; message_length && IsSpace(error_text[message_length - 1]);
+ --message_length) {
+ error_text[message_length - 1] = '\0';
+ }
+
+# endif // GTEST_OS_WINDOWS_MOBILE
+
+ const String error_hex(String::Format("0x%08X ", hr));
+ return ::testing::AssertionFailure()
+ << "Expected: " << expr << " " << expected << ".\n"
+ << " Actual: " << error_hex << error_text << "\n";
+}
+
+} // namespace
+
+AssertionResult IsHRESULTSuccess(const char* expr, long hr) { // NOLINT
+ if (SUCCEEDED(hr)) {
+ return AssertionSuccess();
+ }
+ return HRESULTFailureHelper(expr, "succeeds", hr);
+}
+
+AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT
+ if (FAILED(hr)) {
+ return AssertionSuccess();
+ }
+ return HRESULTFailureHelper(expr, "fails", hr);
+}
+
+#endif // GTEST_OS_WINDOWS
+
+// Utility functions for encoding Unicode text (wide strings) in
+// UTF-8.
+
+// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8
+// like this:
+//
+// Code-point length Encoding
+// 0 - 7 bits 0xxxxxxx
+// 8 - 11 bits 110xxxxx 10xxxxxx
+// 12 - 16 bits 1110xxxx 10xxxxxx 10xxxxxx
+// 17 - 21 bits 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+
+// The maximum code-point a one-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint1 = (static_cast<UInt32>(1) << 7) - 1;
+
+// The maximum code-point a two-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint2 = (static_cast<UInt32>(1) << (5 + 6)) - 1;
+
+// The maximum code-point a three-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint3 = (static_cast<UInt32>(1) << (4 + 2*6)) - 1;
+
+// The maximum code-point a four-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint4 = (static_cast<UInt32>(1) << (3 + 3*6)) - 1;
+
+// Chops off the n lowest bits from a bit pattern. Returns the n
+// lowest bits. As a side effect, the original bit pattern will be
+// shifted to the right by n bits.
+inline UInt32 ChopLowBits(UInt32* bits, int n) {
+ const UInt32 low_bits = *bits & ((static_cast<UInt32>(1) << n) - 1);
+ *bits >>= n;
+ return low_bits;
+}
+
+// Converts a Unicode code point to a narrow string in UTF-8 encoding.
+// code_point parameter is of type UInt32 because wchar_t may not be
+// wide enough to contain a code point.
+// The output buffer str must containt at least 32 characters.
+// The function returns the address of the output buffer.
+// If the code_point is not a valid Unicode code point
+// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'.
+char* CodePointToUtf8(UInt32 code_point, char* str) {
+ if (code_point <= kMaxCodePoint1) {
+ str[1] = '\0';
+ str[0] = static_cast<char>(code_point); // 0xxxxxxx
+ } else if (code_point <= kMaxCodePoint2) {
+ str[2] = '\0';
+ str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[0] = static_cast<char>(0xC0 | code_point); // 110xxxxx
+ } else if (code_point <= kMaxCodePoint3) {
+ str[3] = '\0';
+ str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[0] = static_cast<char>(0xE0 | code_point); // 1110xxxx
+ } else if (code_point <= kMaxCodePoint4) {
+ str[4] = '\0';
+ str[3] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[0] = static_cast<char>(0xF0 | code_point); // 11110xxx
+ } else {
+ // The longest string String::Format can produce when invoked
+ // with these parameters is 28 character long (not including
+ // the terminating nul character). We are asking for 32 character
+ // buffer just in case. This is also enough for strncpy to
+ // null-terminate the destination string.
+ posix::StrNCpy(
+ str, String::Format("(Invalid Unicode 0x%X)", code_point).c_str(), 32);
+ str[31] = '\0'; // Makes sure no change in the format to strncpy leaves
+ // the result unterminated.
+ }
+ return str;
+}
+
+// The following two functions only make sense if the the system
+// uses UTF-16 for wide string encoding. All supported systems
+// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16.
+
+// Determines if the arguments constitute UTF-16 surrogate pair
+// and thus should be combined into a single Unicode code point
+// using CreateCodePointFromUtf16SurrogatePair.
+inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) {
+ return sizeof(wchar_t) == 2 &&
+ (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00;
+}
+
+// Creates a Unicode code point from UTF16 surrogate pair.
+inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first,
+ wchar_t second) {
+ const UInt32 mask = (1 << 10) - 1;
+ return (sizeof(wchar_t) == 2) ?
+ (((first & mask) << 10) | (second & mask)) + 0x10000 :
+ // This function should not be called when the condition is
+ // false, but we provide a sensible default in case it is.
+ static_cast<UInt32>(first);
+}
+
+// Converts a wide string to a narrow string in UTF-8 encoding.
+// The wide string is assumed to have the following encoding:
+// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
+// UTF-32 if sizeof(wchar_t) == 4 (on Linux)
+// Parameter str points to a null-terminated wide string.
+// Parameter num_chars may additionally limit the number
+// of wchar_t characters processed. -1 is used when the entire string
+// should be processed.
+// If the string contains code points that are not valid Unicode code points
+// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
+// and contains invalid UTF-16 surrogate pairs, values in those pairs
+// will be encoded as individual Unicode characters from Basic Normal Plane.
+String WideStringToUtf8(const wchar_t* str, int num_chars) {
+ if (num_chars == -1)
+ num_chars = static_cast<int>(wcslen(str));
+
+ ::std::stringstream stream;
+ for (int i = 0; i < num_chars; ++i) {
+ UInt32 unicode_code_point;
+
+ if (str[i] == L'\0') {
+ break;
+ } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) {
+ unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i],
+ str[i + 1]);
+ i++;
+ } else {
+ unicode_code_point = static_cast<UInt32>(str[i]);
+ }
+
+ char buffer[32]; // CodePointToUtf8 requires a buffer this big.
+ stream << CodePointToUtf8(unicode_code_point, buffer);
+ }
+ return StringStreamToString(&stream);
+}
+
+// Converts a wide C string to a String using the UTF-8 encoding.
+// NULL will be converted to "(null)".
+String String::ShowWideCString(const wchar_t * wide_c_str) {
+ if (wide_c_str == NULL) return String("(null)");
+
+ return String(internal::WideStringToUtf8(wide_c_str, -1).c_str());
+}
+
+// Similar to ShowWideCString(), except that this function encloses
+// the converted string in double quotes.
+String String::ShowWideCStringQuoted(const wchar_t* wide_c_str) {
+ if (wide_c_str == NULL) return String("(null)");
+
+ return String::Format("L\"%s\"",
+ String::ShowWideCString(wide_c_str).c_str());
+}
+
+// Compares two wide C strings. Returns true iff they have the same
+// content.
+//
+// Unlike wcscmp(), this function can handle NULL argument(s). A NULL
+// C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) {
+ if (lhs == NULL) return rhs == NULL;
+
+ if (rhs == NULL) return false;
+
+ return wcscmp(lhs, rhs) == 0;
+}
+
+// Helper function for *_STREQ on wide strings.
+AssertionResult CmpHelperSTREQ(const char* expected_expression,
+ const char* actual_expression,
+ const wchar_t* expected,
+ const wchar_t* actual) {
+ if (String::WideCStringEquals(expected, actual)) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ String::ShowWideCStringQuoted(expected),
+ String::ShowWideCStringQuoted(actual),
+ false);
+}
+
+// Helper function for *_STRNE on wide strings.
+AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const wchar_t* s1,
+ const wchar_t* s2) {
+ if (!String::WideCStringEquals(s1, s2)) {
+ return AssertionSuccess();
+ }
+
+ return AssertionFailure() << "Expected: (" << s1_expression << ") != ("
+ << s2_expression << "), actual: "
+ << String::ShowWideCStringQuoted(s1)
+ << " vs " << String::ShowWideCStringQuoted(s2);
+}
+
+// Compares two C strings, ignoring case. Returns true iff they have
+// the same content.
+//
+// Unlike strcasecmp(), this function can handle NULL argument(s). A
+// NULL C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) {
+ if (lhs == NULL)
+ return rhs == NULL;
+ if (rhs == NULL)
+ return false;
+ return posix::StrCaseCmp(lhs, rhs) == 0;
+}
+
+ // Compares two wide C strings, ignoring case. Returns true iff they
+ // have the same content.
+ //
+ // Unlike wcscasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL wide C string,
+ // including the empty string.
+ // NB: The implementations on different platforms slightly differ.
+ // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
+ // environment variable. On GNU platform this method uses wcscasecmp
+ // which compares according to LC_CTYPE category of the current locale.
+ // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
+ // current locale.
+bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
+ const wchar_t* rhs) {
+ if (lhs == NULL) return rhs == NULL;
+
+ if (rhs == NULL) return false;
+
+#if GTEST_OS_WINDOWS
+ return _wcsicmp(lhs, rhs) == 0;
+#elif GTEST_OS_LINUX && !GTEST_OS_LINUX_ANDROID
+ return wcscasecmp(lhs, rhs) == 0;
+#else
+ // Android, Mac OS X and Cygwin don't define wcscasecmp.
+ // Other unknown OSes may not define it either.
+ wint_t left, right;
+ do {
+ left = towlower(*lhs++);
+ right = towlower(*rhs++);
+ } while (left && left == right);
+ return left == right;
+#endif // OS selector
+}
+
+// Compares this with another String.
+// Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0
+// if this is greater than rhs.
+int String::Compare(const String & rhs) const {
+ const char* const lhs_c_str = c_str();
+ const char* const rhs_c_str = rhs.c_str();
+
+ if (lhs_c_str == NULL) {
+ return rhs_c_str == NULL ? 0 : -1; // NULL < anything except NULL
+ } else if (rhs_c_str == NULL) {
+ return 1;
+ }
+
+ const size_t shorter_str_len =
+ length() <= rhs.length() ? length() : rhs.length();
+ for (size_t i = 0; i != shorter_str_len; i++) {
+ if (lhs_c_str[i] < rhs_c_str[i]) {
+ return -1;
+ } else if (lhs_c_str[i] > rhs_c_str[i]) {
+ return 1;
+ }
+ }
+ return (length() < rhs.length()) ? -1 :
+ (length() > rhs.length()) ? 1 : 0;
+}
+
+// Returns true iff this String ends with the given suffix. *Any*
+// String is considered to end with a NULL or empty suffix.
+bool String::EndsWith(const char* suffix) const {
+ if (suffix == NULL || CStringEquals(suffix, "")) return true;
+
+ if (c_str() == NULL) return false;
+
+ const size_t this_len = strlen(c_str());
+ const size_t suffix_len = strlen(suffix);
+ return (this_len >= suffix_len) &&
+ CStringEquals(c_str() + this_len - suffix_len, suffix);
+}
+
+// Returns true iff this String ends with the given suffix, ignoring case.
+// Any String is considered to end with a NULL or empty suffix.
+bool String::EndsWithCaseInsensitive(const char* suffix) const {
+ if (suffix == NULL || CStringEquals(suffix, "")) return true;
+
+ if (c_str() == NULL) return false;
+
+ const size_t this_len = strlen(c_str());
+ const size_t suffix_len = strlen(suffix);
+ return (this_len >= suffix_len) &&
+ CaseInsensitiveCStringEquals(c_str() + this_len - suffix_len, suffix);
+}
+
+// Formats a list of arguments to a String, using the same format
+// spec string as for printf.
+//
+// We do not use the StringPrintf class as it is not universally
+// available.
+//
+// The result is limited to 4096 characters (including the tailing 0).
+// If 4096 characters are not enough to format the input, or if
+// there's an error, "<formatting error or buffer exceeded>" is
+// returned.
+String String::Format(const char * format, ...) {
+ va_list args;
+ va_start(args, format);
+
+ char buffer[4096];
+ const int kBufferSize = sizeof(buffer)/sizeof(buffer[0]);
+
+ // MSVC 8 deprecates vsnprintf(), so we want to suppress warning
+ // 4996 (deprecated function) there.
+#ifdef _MSC_VER // We are using MSVC.
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4996) // Temporarily disables warning 4996.
+
+ const int size = vsnprintf(buffer, kBufferSize, format, args);
+
+# pragma warning(pop) // Restores the warning state.
+#else // We are not using MSVC.
+ const int size = vsnprintf(buffer, kBufferSize, format, args);
+#endif // _MSC_VER
+ va_end(args);
+
+ // vsnprintf()'s behavior is not portable. When the buffer is not
+ // big enough, it returns a negative value in MSVC, and returns the
+ // needed buffer size on Linux. When there is an output error, it
+ // always returns a negative value. For simplicity, we lump the two
+ // error cases together.
+ if (size < 0 || size >= kBufferSize) {
+ return String("<formatting error or buffer exceeded>");
+ } else {
+ return String(buffer, size);
+ }
+}
+
+// Converts the buffer in a stringstream to a String, converting NUL
+// bytes to "\\0" along the way.
+String StringStreamToString(::std::stringstream* ss) {
+ const ::std::string& str = ss->str();
+ const char* const start = str.c_str();
+ const char* const end = start + str.length();
+
+ // We need to use a helper stringstream to do this transformation
+ // because String doesn't support push_back().
+ ::std::stringstream helper;
+ for (const char* ch = start; ch != end; ++ch) {
+ if (*ch == '\0') {
+ helper << "\\0"; // Replaces NUL with "\\0";
+ } else {
+ helper.put(*ch);
+ }
+ }
+
+ return String(helper.str().c_str());
+}
+
+// Appends the user-supplied message to the Google-Test-generated message.
+String AppendUserMessage(const String& gtest_msg,
+ const Message& user_msg) {
+ // Appends the user message if it's non-empty.
+ const String user_msg_string = user_msg.GetString();
+ if (user_msg_string.empty()) {
+ return gtest_msg;
+ }
+
+ Message msg;
+ msg << gtest_msg << "\n" << user_msg_string;
+
+ return msg.GetString();
+}
+
+} // namespace internal
+
+// class TestResult
+
+// Creates an empty TestResult.
+TestResult::TestResult()
+ : death_test_count_(0),
+ elapsed_time_(0) {
+}
+
+// D'tor.
+TestResult::~TestResult() {
+}
+
+// Returns the i-th test part result among all the results. i can
+// range from 0 to total_part_count() - 1. If i is not in that range,
+// aborts the program.
+const TestPartResult& TestResult::GetTestPartResult(int i) const {
+ if (i < 0 || i >= total_part_count())
+ internal::posix::Abort();
+ return test_part_results_.at(i);
+}
+
+// Returns the i-th test property. i can range from 0 to
+// test_property_count() - 1. If i is not in that range, aborts the
+// program.
+const TestProperty& TestResult::GetTestProperty(int i) const {
+ if (i < 0 || i >= test_property_count())
+ internal::posix::Abort();
+ return test_properties_.at(i);
+}
+
+// Clears the test part results.
+void TestResult::ClearTestPartResults() {
+ test_part_results_.clear();
+}
+
+// Adds a test part result to the list.
+void TestResult::AddTestPartResult(const TestPartResult& test_part_result) {
+ test_part_results_.push_back(test_part_result);
+}
+
+// Adds a test property to the list. If a property with the same key as the
+// supplied property is already represented, the value of this test_property
+// replaces the old value for that key.
+void TestResult::RecordProperty(const TestProperty& test_property) {
+ if (!ValidateTestProperty(test_property)) {
+ return;
+ }
+ internal::MutexLock lock(&test_properites_mutex_);
+ const std::vector<TestProperty>::iterator property_with_matching_key =
+ std::find_if(test_properties_.begin(), test_properties_.end(),
+ internal::TestPropertyKeyIs(test_property.key()));
+ if (property_with_matching_key == test_properties_.end()) {
+ test_properties_.push_back(test_property);
+ return;
+ }
+ property_with_matching_key->SetValue(test_property.value());
+}
+
+// Adds a failure if the key is a reserved attribute of Google Test
+// testcase tags. Returns true if the property is valid.
+bool TestResult::ValidateTestProperty(const TestProperty& test_property) {
+ internal::String key(test_property.key());
+ if (key == "name" || key == "status" || key == "time" || key == "classname") {
+ ADD_FAILURE()
+ << "Reserved key used in RecordProperty(): "
+ << key
+ << " ('name', 'status', 'time', and 'classname' are reserved by "
+ << GTEST_NAME_ << ")";
+ return false;
+ }
+ return true;
+}
+
+// Clears the object.
+void TestResult::Clear() {
+ test_part_results_.clear();
+ test_properties_.clear();
+ death_test_count_ = 0;
+ elapsed_time_ = 0;
+}
+
+// Returns true iff the test failed.
+bool TestResult::Failed() const {
+ for (int i = 0; i < total_part_count(); ++i) {
+ if (GetTestPartResult(i).failed())
+ return true;
+ }
+ return false;
+}
+
+// Returns true iff the test part fatally failed.
+static bool TestPartFatallyFailed(const TestPartResult& result) {
+ return result.fatally_failed();
+}
+
+// Returns true iff the test fatally failed.
+bool TestResult::HasFatalFailure() const {
+ return CountIf(test_part_results_, TestPartFatallyFailed) > 0;
+}
+
+// Returns true iff the test part non-fatally failed.
+static bool TestPartNonfatallyFailed(const TestPartResult& result) {
+ return result.nonfatally_failed();
+}
+
+// Returns true iff the test has a non-fatal failure.
+bool TestResult::HasNonfatalFailure() const {
+ return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0;
+}
+
+// Gets the number of all test parts. This is the sum of the number
+// of successful test parts and the number of failed test parts.
+int TestResult::total_part_count() const {
+ return static_cast<int>(test_part_results_.size());
+}
+
+// Returns the number of the test properties.
+int TestResult::test_property_count() const {
+ return static_cast<int>(test_properties_.size());
+}
+
+// class Test
+
+// Creates a Test object.
+
+// The c'tor saves the values of all Google Test flags.
+Test::Test()
+ : gtest_flag_saver_(new internal::GTestFlagSaver) {
+}
+
+// The d'tor restores the values of all Google Test flags.
+Test::~Test() {
+ delete gtest_flag_saver_;
+}
+
+// Sets up the test fixture.
+//
+// A sub-class may override this.
+void Test::SetUp() {
+}
+
+// Tears down the test fixture.
+//
+// A sub-class may override this.
+void Test::TearDown() {
+}
+
+// Allows user supplied key value pairs to be recorded for later output.
+void Test::RecordProperty(const char* key, const char* value) {
+ UnitTest::GetInstance()->RecordPropertyForCurrentTest(key, value);
+}
+
+// Allows user supplied key value pairs to be recorded for later output.
+void Test::RecordProperty(const char* key, int value) {
+ Message value_message;
+ value_message << value;
+ RecordProperty(key, value_message.GetString().c_str());
+}
+
+namespace internal {
+
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+ const String& message) {
+ // This function is a friend of UnitTest and as such has access to
+ // AddTestPartResult.
+ UnitTest::GetInstance()->AddTestPartResult(
+ result_type,
+ NULL, // No info about the source file where the exception occurred.
+ -1, // We have no info on which line caused the exception.
+ message,
+ String()); // No stack trace, either.
+}
+
+} // namespace internal
+
+// Google Test requires all tests in the same test case to use the same test
+// fixture class. This function checks if the current test has the
+// same fixture class as the first test in the current test case. If
+// yes, it returns true; otherwise it generates a Google Test failure and
+// returns false.
+bool Test::HasSameFixtureClass() {
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ const TestCase* const test_case = impl->current_test_case();
+
+ // Info about the first test in the current test case.
+ const TestInfo* const first_test_info = test_case->test_info_list()[0];
+ const internal::TypeId first_fixture_id = first_test_info->fixture_class_id_;
+ const char* const first_test_name = first_test_info->name();
+
+ // Info about the current test.
+ const TestInfo* const this_test_info = impl->current_test_info();
+ const internal::TypeId this_fixture_id = this_test_info->fixture_class_id_;
+ const char* const this_test_name = this_test_info->name();
+
+ if (this_fixture_id != first_fixture_id) {
+ // Is the first test defined using TEST?
+ const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId();
+ // Is this test defined using TEST?
+ const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId();
+
+ if (first_is_TEST || this_is_TEST) {
+ // The user mixed TEST and TEST_F in this test case - we'll tell
+ // him/her how to fix it.
+
+ // Gets the name of the TEST and the name of the TEST_F. Note
+ // that first_is_TEST and this_is_TEST cannot both be true, as
+ // the fixture IDs are different for the two tests.
+ const char* const TEST_name =
+ first_is_TEST ? first_test_name : this_test_name;
+ const char* const TEST_F_name =
+ first_is_TEST ? this_test_name : first_test_name;
+
+ ADD_FAILURE()
+ << "All tests in the same test case must use the same test fixture\n"
+ << "class, so mixing TEST_F and TEST in the same test case is\n"
+ << "illegal. In test case " << this_test_info->test_case_name()
+ << ",\n"
+ << "test " << TEST_F_name << " is defined using TEST_F but\n"
+ << "test " << TEST_name << " is defined using TEST. You probably\n"
+ << "want to change the TEST to TEST_F or move it to another test\n"
+ << "case.";
+ } else {
+ // The user defined two fixture classes with the same name in
+ // two namespaces - we'll tell him/her how to fix it.
+ ADD_FAILURE()
+ << "All tests in the same test case must use the same test fixture\n"
+ << "class. However, in test case "
+ << this_test_info->test_case_name() << ",\n"
+ << "you defined test " << first_test_name
+ << " and test " << this_test_name << "\n"
+ << "using two different test fixture classes. This can happen if\n"
+ << "the two classes are from different namespaces or translation\n"
+ << "units and have the same name. You should probably rename one\n"
+ << "of the classes to put the tests into different test cases.";
+ }
+ return false;
+ }
+
+ return true;
+}
+
+#if GTEST_HAS_SEH
+
+// Adds an "exception thrown" fatal failure to the current test. This
+// function returns its result via an output parameter pointer because VC++
+// prohibits creation of objects with destructors on stack in functions
+// using __try (see error C2712).
+static internal::String* FormatSehExceptionMessage(DWORD exception_code,
+ const char* location) {
+ Message message;
+ message << "SEH exception with code 0x" << std::setbase(16) <<
+ exception_code << std::setbase(10) << " thrown in " << location << ".";
+
+ return new internal::String(message.GetString());
+}
+
+#endif // GTEST_HAS_SEH
+
+#if GTEST_HAS_EXCEPTIONS
+
+// Adds an "exception thrown" fatal failure to the current test.
+static internal::String FormatCxxExceptionMessage(const char* description,
+ const char* location) {
+ Message message;
+ if (description != NULL) {
+ message << "C++ exception with description \"" << description << "\"";
+ } else {
+ message << "Unknown C++ exception";
+ }
+ message << " thrown in " << location << ".";
+
+ return message.GetString();
+}
+
+static internal::String PrintTestPartResultToString(
+ const TestPartResult& test_part_result);
+
+// A failed Google Test assertion will throw an exception of this type when
+// GTEST_FLAG(throw_on_failure) is true (if exceptions are enabled). We
+// derive it from std::runtime_error, which is for errors presumably
+// detectable only at run time. Since std::runtime_error inherits from
+// std::exception, many testing frameworks know how to extract and print the
+// message inside it.
+class GoogleTestFailureException : public ::std::runtime_error {
+ public:
+ explicit GoogleTestFailureException(const TestPartResult& failure)
+ : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {}
+};
+#endif // GTEST_HAS_EXCEPTIONS
+
+namespace internal {
+// We put these helper functions in the internal namespace as IBM's xlC
+// compiler rejects the code if they were declared static.
+
+// Runs the given method and handles SEH exceptions it throws, when
+// SEH is supported; returns the 0-value for type Result in case of an
+// SEH exception. (Microsoft compilers cannot handle SEH and C++
+// exceptions in the same function. Therefore, we provide a separate
+// wrapper function for handling SEH exceptions.)
+template <class T, typename Result>
+Result HandleSehExceptionsInMethodIfSupported(
+ T* object, Result (T::*method)(), const char* location) {
+#if GTEST_HAS_SEH
+ __try {
+ return (object->*method)();
+ } __except (internal::UnitTestOptions::GTestShouldProcessSEH( // NOLINT
+ GetExceptionCode())) {
+ // We create the exception message on the heap because VC++ prohibits
+ // creation of objects with destructors on stack in functions using __try
+ // (see error C2712).
+ internal::String* exception_message = FormatSehExceptionMessage(
+ GetExceptionCode(), location);
+ internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure,
+ *exception_message);
+ delete exception_message;
+ return static_cast<Result>(0);
+ }
+#else
+ (void)location;
+ return (object->*method)();
+#endif // GTEST_HAS_SEH
+}
+
+// Runs the given method and catches and reports C++ and/or SEH-style
+// exceptions, if they are supported; returns the 0-value for type
+// Result in case of an SEH exception.
+template <class T, typename Result>
+Result HandleExceptionsInMethodIfSupported(
+ T* object, Result (T::*method)(), const char* location) {
+ // NOTE: The user code can affect the way in which Google Test handles
+ // exceptions by setting GTEST_FLAG(catch_exceptions), but only before
+ // RUN_ALL_TESTS() starts. It is technically possible to check the flag
+ // after the exception is caught and either report or re-throw the
+ // exception based on the flag's value:
+ //
+ // try {
+ // // Perform the test method.
+ // } catch (...) {
+ // if (GTEST_FLAG(catch_exceptions))
+ // // Report the exception as failure.
+ // else
+ // throw; // Re-throws the original exception.
+ // }
+ //
+ // However, the purpose of this flag is to allow the program to drop into
+ // the debugger when the exception is thrown. On most platforms, once the
+ // control enters the catch block, the exception origin information is
+ // lost and the debugger will stop the program at the point of the
+ // re-throw in this function -- instead of at the point of the original
+ // throw statement in the code under test. For this reason, we perform
+ // the check early, sacrificing the ability to affect Google Test's
+ // exception handling in the method where the exception is thrown.
+ if (internal::GetUnitTestImpl()->catch_exceptions()) {
+#if GTEST_HAS_EXCEPTIONS
+ try {
+ return HandleSehExceptionsInMethodIfSupported(object, method, location);
+ } catch (const GoogleTestFailureException&) { // NOLINT
+ // This exception doesn't originate in code under test. It makes no
+ // sense to report it as a test failure.
+ throw;
+ } catch (const std::exception& e) { // NOLINT
+ internal::ReportFailureInUnknownLocation(
+ TestPartResult::kFatalFailure,
+ FormatCxxExceptionMessage(e.what(), location));
+ } catch (...) { // NOLINT
+ internal::ReportFailureInUnknownLocation(
+ TestPartResult::kFatalFailure,
+ FormatCxxExceptionMessage(NULL, location));
+ }
+ return static_cast<Result>(0);
+#else
+ return HandleSehExceptionsInMethodIfSupported(object, method, location);
+#endif // GTEST_HAS_EXCEPTIONS
+ } else {
+ return (object->*method)();
+ }
+}
+
+} // namespace internal
+
+// Runs the test and updates the test result.
+void Test::Run() {
+ if (!HasSameFixtureClass()) return;
+
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(this, &Test::SetUp, "SetUp()");
+ // We will run the test only if SetUp() was successful.
+ if (!HasFatalFailure()) {
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ this, &Test::TestBody, "the test body");
+ }
+
+ // However, we want to clean up as much as possible. Hence we will
+ // always call TearDown(), even if SetUp() or the test body has
+ // failed.
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ this, &Test::TearDown, "TearDown()");
+}
+
+// Returns true iff the current test has a fatal failure.
+bool Test::HasFatalFailure() {
+ return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure();
+}
+
+// Returns true iff the current test has a non-fatal failure.
+bool Test::HasNonfatalFailure() {
+ return internal::GetUnitTestImpl()->current_test_result()->
+ HasNonfatalFailure();
+}
+
+// class TestInfo
+
+// Constructs a TestInfo object. It assumes ownership of the test factory
+// object.
+// TODO(vladl@google.com): Make a_test_case_name and a_name const string&'s
+// to signify they cannot be NULLs.
+TestInfo::TestInfo(const char* a_test_case_name,
+ const char* a_name,
+ const char* a_type_param,
+ const char* a_value_param,
+ internal::TypeId fixture_class_id,
+ internal::TestFactoryBase* factory)
+ : test_case_name_(a_test_case_name),
+ name_(a_name),
+ type_param_(a_type_param ? new std::string(a_type_param) : NULL),
+ value_param_(a_value_param ? new std::string(a_value_param) : NULL),
+ fixture_class_id_(fixture_class_id),
+ should_run_(false),
+ is_disabled_(false),
+ matches_filter_(false),
+ factory_(factory),
+ result_() {}
+
+// Destructs a TestInfo object.
+TestInfo::~TestInfo() { delete factory_; }
+
+namespace internal {
+
+// Creates a new TestInfo object and registers it with Google Test;
+// returns the created object.
+//
+// Arguments:
+//
+// test_case_name: name of the test case
+// name: name of the test
+// type_param: the name of the test's type parameter, or NULL if
+// this is not a typed or a type-parameterized test.
+// value_param: text representation of the test's value parameter,
+// or NULL if this is not a value-parameterized test.
+// fixture_class_id: ID of the test fixture class
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+// factory: pointer to the factory that creates a test object.
+// The newly created TestInfo instance will assume
+// ownership of the factory object.
+TestInfo* MakeAndRegisterTestInfo(
+ const char* test_case_name, const char* name,
+ const char* type_param,
+ const char* value_param,
+ TypeId fixture_class_id,
+ SetUpTestCaseFunc set_up_tc,
+ TearDownTestCaseFunc tear_down_tc,
+ TestFactoryBase* factory) {
+ TestInfo* const test_info =
+ new TestInfo(test_case_name, name, type_param, value_param,
+ fixture_class_id, factory);
+ GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info);
+ return test_info;
+}
+
+#if GTEST_HAS_PARAM_TEST
+void ReportInvalidTestCaseType(const char* test_case_name,
+ const char* file, int line) {
+ Message errors;
+ errors
+ << "Attempted redefinition of test case " << test_case_name << ".\n"
+ << "All tests in the same test case must use the same test fixture\n"
+ << "class. However, in test case " << test_case_name << ", you tried\n"
+ << "to define a test using a fixture class different from the one\n"
+ << "used earlier. This can happen if the two fixture classes are\n"
+ << "from different namespaces and have the same name. You should\n"
+ << "probably rename one of the classes to put the tests into different\n"
+ << "test cases.";
+
+ fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
+ errors.GetString().c_str());
+}
+#endif // GTEST_HAS_PARAM_TEST
+
+} // namespace internal
+
+namespace {
+
+// A predicate that checks the test name of a TestInfo against a known
+// value.
+//
+// This is used for implementation of the TestCase class only. We put
+// it in the anonymous namespace to prevent polluting the outer
+// namespace.
+//
+// TestNameIs is copyable.
+class TestNameIs {
+ public:
+ // Constructor.
+ //
+ // TestNameIs has NO default constructor.
+ explicit TestNameIs(const char* name)
+ : name_(name) {}
+
+ // Returns true iff the test name of test_info matches name_.
+ bool operator()(const TestInfo * test_info) const {
+ return test_info && internal::String(test_info->name()).Compare(name_) == 0;
+ }
+
+ private:
+ internal::String name_;
+};
+
+} // namespace
+
+namespace internal {
+
+// This method expands all parameterized tests registered with macros TEST_P
+// and INSTANTIATE_TEST_CASE_P into regular tests and registers those.
+// This will be done just once during the program runtime.
+void UnitTestImpl::RegisterParameterizedTests() {
+#if GTEST_HAS_PARAM_TEST
+ if (!parameterized_tests_registered_) {
+ parameterized_test_registry_.RegisterTests();
+ parameterized_tests_registered_ = true;
+ }
+#endif
+}
+
+} // namespace internal
+
+// Creates the test object, runs it, records its result, and then
+// deletes it.
+void TestInfo::Run() {
+ if (!should_run_) return;
+
+ // Tells UnitTest where to store test result.
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->set_current_test_info(this);
+
+ TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+ // Notifies the unit test event listeners that a test is about to start.
+ repeater->OnTestStart(*this);
+
+ const TimeInMillis start = internal::GetTimeInMillis();
+
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+
+ // Creates the test object.
+ Test* const test = internal::HandleExceptionsInMethodIfSupported(
+ factory_, &internal::TestFactoryBase::CreateTest,
+ "the test fixture's constructor");
+
+ // Runs the test only if the test object was created and its
+ // constructor didn't generate a fatal failure.
+ if ((test != NULL) && !Test::HasFatalFailure()) {
+ // This doesn't throw as all user code that can throw are wrapped into
+ // exception handling code.
+ test->Run();
+ }
+
+ // Deletes the test object.
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ test, &Test::DeleteSelf_, "the test fixture's destructor");
+
+ result_.set_elapsed_time(internal::GetTimeInMillis() - start);
+
+ // Notifies the unit test event listener that a test has just finished.
+ repeater->OnTestEnd(*this);
+
+ // Tells UnitTest to stop associating assertion results to this
+ // test.
+ impl->set_current_test_info(NULL);
+}
+
+// class TestCase
+
+// Gets the number of successful tests in this test case.
+int TestCase::successful_test_count() const {
+ return CountIf(test_info_list_, TestPassed);
+}
+
+// Gets the number of failed tests in this test case.
+int TestCase::failed_test_count() const {
+ return CountIf(test_info_list_, TestFailed);
+}
+
+int TestCase::disabled_test_count() const {
+ return CountIf(test_info_list_, TestDisabled);
+}
+
+// Get the number of tests in this test case that should run.
+int TestCase::test_to_run_count() const {
+ return CountIf(test_info_list_, ShouldRunTest);
+}
+
+// Gets the number of all tests.
+int TestCase::total_test_count() const {
+ return static_cast<int>(test_info_list_.size());
+}
+
+// Creates a TestCase with the given name.
+//
+// Arguments:
+//
+// name: name of the test case
+// a_type_param: the name of the test case's type parameter, or NULL if
+// this is not a typed or a type-parameterized test case.
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+TestCase::TestCase(const char* a_name, const char* a_type_param,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc)
+ : name_(a_name),
+ type_param_(a_type_param ? new std::string(a_type_param) : NULL),
+ set_up_tc_(set_up_tc),
+ tear_down_tc_(tear_down_tc),
+ should_run_(false),
+ elapsed_time_(0) {
+}
+
+// Destructor of TestCase.
+TestCase::~TestCase() {
+ // Deletes every Test in the collection.
+ ForEach(test_info_list_, internal::Delete<TestInfo>);
+}
+
+// Returns the i-th test among all the tests. i can range from 0 to
+// total_test_count() - 1. If i is not in that range, returns NULL.
+const TestInfo* TestCase::GetTestInfo(int i) const {
+ const int index = GetElementOr(test_indices_, i, -1);
+ return index < 0 ? NULL : test_info_list_[index];
+}
+
+// Returns the i-th test among all the tests. i can range from 0 to
+// total_test_count() - 1. If i is not in that range, returns NULL.
+TestInfo* TestCase::GetMutableTestInfo(int i) {
+ const int index = GetElementOr(test_indices_, i, -1);
+ return index < 0 ? NULL : test_info_list_[index];
+}
+
+// Adds a test to this test case. Will delete the test upon
+// destruction of the TestCase object.
+void TestCase::AddTestInfo(TestInfo * test_info) {
+ test_info_list_.push_back(test_info);
+ test_indices_.push_back(static_cast<int>(test_indices_.size()));
+}
+
+// Runs every test in this TestCase.
+void TestCase::Run() {
+ if (!should_run_) return;
+
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->set_current_test_case(this);
+
+ TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+ repeater->OnTestCaseStart(*this);
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ this, &TestCase::RunSetUpTestCase, "SetUpTestCase()");
+
+ const internal::TimeInMillis start = internal::GetTimeInMillis();
+ for (int i = 0; i < total_test_count(); i++) {
+ GetMutableTestInfo(i)->Run();
+ }
+ elapsed_time_ = internal::GetTimeInMillis() - start;
+
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ this, &TestCase::RunTearDownTestCase, "TearDownTestCase()");
+
+ repeater->OnTestCaseEnd(*this);
+ impl->set_current_test_case(NULL);
+}
+
+// Clears the results of all tests in this test case.
+void TestCase::ClearResult() {
+ ForEach(test_info_list_, TestInfo::ClearTestResult);
+}
+
+// Shuffles the tests in this test case.
+void TestCase::ShuffleTests(internal::Random* random) {
+ Shuffle(random, &test_indices_);
+}
+
+// Restores the test order to before the first shuffle.
+void TestCase::UnshuffleTests() {
+ for (size_t i = 0; i < test_indices_.size(); i++) {
+ test_indices_[i] = static_cast<int>(i);
+ }
+}
+
+// Formats a countable noun. Depending on its quantity, either the
+// singular form or the plural form is used. e.g.
+//
+// FormatCountableNoun(1, "formula", "formuli") returns "1 formula".
+// FormatCountableNoun(5, "book", "books") returns "5 books".
+static internal::String FormatCountableNoun(int count,
+ const char * singular_form,
+ const char * plural_form) {
+ return internal::String::Format("%d %s", count,
+ count == 1 ? singular_form : plural_form);
+}
+
+// Formats the count of tests.
+static internal::String FormatTestCount(int test_count) {
+ return FormatCountableNoun(test_count, "test", "tests");
+}
+
+// Formats the count of test cases.
+static internal::String FormatTestCaseCount(int test_case_count) {
+ return FormatCountableNoun(test_case_count, "test case", "test cases");
+}
+
+// Converts a TestPartResult::Type enum to human-friendly string
+// representation. Both kNonFatalFailure and kFatalFailure are translated
+// to "Failure", as the user usually doesn't care about the difference
+// between the two when viewing the test result.
+static const char * TestPartResultTypeToString(TestPartResult::Type type) {
+ switch (type) {
+ case TestPartResult::kSuccess:
+ return "Success";
+
+ case TestPartResult::kNonFatalFailure:
+ case TestPartResult::kFatalFailure:
+#ifdef _MSC_VER
+ return "error: ";
+#else
+ return "Failure\n";
+#endif
+ default:
+ return "Unknown result type";
+ }
+}
+
+// Prints a TestPartResult to a String.
+static internal::String PrintTestPartResultToString(
+ const TestPartResult& test_part_result) {
+ return (Message()
+ << internal::FormatFileLocation(test_part_result.file_name(),
+ test_part_result.line_number())
+ << " " << TestPartResultTypeToString(test_part_result.type())
+ << test_part_result.message()).GetString();
+}
+
+// Prints a TestPartResult.
+static void PrintTestPartResult(const TestPartResult& test_part_result) {
+ const internal::String& result =
+ PrintTestPartResultToString(test_part_result);
+ printf("%s\n", result.c_str());
+ fflush(stdout);
+ // If the test program runs in Visual Studio or a debugger, the
+ // following statements add the test part result message to the Output
+ // window such that the user can double-click on it to jump to the
+ // corresponding source code location; otherwise they do nothing.
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+ // We don't call OutputDebugString*() on Windows Mobile, as printing
+ // to stdout is done by OutputDebugString() there already - we don't
+ // want the same message printed twice.
+ ::OutputDebugStringA(result.c_str());
+ ::OutputDebugStringA("\n");
+#endif
+}
+
+// class PrettyUnitTestResultPrinter
+
+namespace internal {
+
+enum GTestColor {
+ COLOR_DEFAULT,
+ COLOR_RED,
+ COLOR_GREEN,
+ COLOR_YELLOW
+};
+
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+
+// Returns the character attribute for the given color.
+WORD GetColorAttribute(GTestColor color) {
+ switch (color) {
+ case COLOR_RED: return FOREGROUND_RED;
+ case COLOR_GREEN: return FOREGROUND_GREEN;
+ case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN;
+ default: return 0;
+ }
+}
+
+#else
+
+// Returns the ANSI color code for the given color. COLOR_DEFAULT is
+// an invalid input.
+const char* GetAnsiColorCode(GTestColor color) {
+ switch (color) {
+ case COLOR_RED: return "1";
+ case COLOR_GREEN: return "2";
+ case COLOR_YELLOW: return "3";
+ default: return NULL;
+ };
+}
+
+#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+
+// Returns true iff Google Test should use colors in the output.
+bool ShouldUseColor(bool stdout_is_tty) {
+ const char* const gtest_color = GTEST_FLAG(color).c_str();
+
+ if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) {
+#if GTEST_OS_WINDOWS
+ // On Windows the TERM variable is usually not set, but the
+ // console there does support colors.
+ return stdout_is_tty;
+#else
+ // On non-Windows platforms, we rely on the TERM variable.
+ const char* const term = posix::GetEnv("TERM");
+ const bool term_supports_color =
+ String::CStringEquals(term, "xterm") ||
+ String::CStringEquals(term, "xterm-color") ||
+ String::CStringEquals(term, "xterm-256color") ||
+ String::CStringEquals(term, "screen") ||
+ String::CStringEquals(term, "linux") ||
+ String::CStringEquals(term, "cygwin");
+ return stdout_is_tty && term_supports_color;
+#endif // GTEST_OS_WINDOWS
+ }
+
+ return String::CaseInsensitiveCStringEquals(gtest_color, "yes") ||
+ String::CaseInsensitiveCStringEquals(gtest_color, "true") ||
+ String::CaseInsensitiveCStringEquals(gtest_color, "t") ||
+ String::CStringEquals(gtest_color, "1");
+ // We take "yes", "true", "t", and "1" as meaning "yes". If the
+ // value is neither one of these nor "auto", we treat it as "no" to
+ // be conservative.
+}
+
+// Helpers for printing colored strings to stdout. Note that on Windows, we
+// cannot simply emit special characters and have the terminal change colors.
+// This routine must actually emit the characters rather than return a string
+// that would be colored when printed, as can be done on Linux.
+void ColoredPrintf(GTestColor color, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS
+ const bool use_color = false;
+#else
+ static const bool in_color_mode =
+ ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0);
+ const bool use_color = in_color_mode && (color != COLOR_DEFAULT);
+#endif // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS
+ // The '!= 0' comparison is necessary to satisfy MSVC 7.1.
+
+ if (!use_color) {
+ vprintf(fmt, args);
+ va_end(args);
+ return;
+ }
+
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+ const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
+
+ // Gets the current text color.
+ CONSOLE_SCREEN_BUFFER_INFO buffer_info;
+ GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
+ const WORD old_color_attrs = buffer_info.wAttributes;
+
+ // We need to flush the stream buffers into the console before each
+ // SetConsoleTextAttribute call lest it affect the text that is already
+ // printed but has not yet reached the console.
+ fflush(stdout);
+ SetConsoleTextAttribute(stdout_handle,
+ GetColorAttribute(color) | FOREGROUND_INTENSITY);
+ vprintf(fmt, args);
+
+ fflush(stdout);
+ // Restores the text color.
+ SetConsoleTextAttribute(stdout_handle, old_color_attrs);
+#else
+ printf("\033[0;3%sm", GetAnsiColorCode(color));
+ vprintf(fmt, args);
+ printf("\033[m"); // Resets the terminal to default.
+#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+ va_end(args);
+}
+
+void PrintFullTestCommentIfPresent(const TestInfo& test_info) {
+ const char* const type_param = test_info.type_param();
+ const char* const value_param = test_info.value_param();
+
+ if (type_param != NULL || value_param != NULL) {
+ printf(", where ");
+ if (type_param != NULL) {
+ printf("TypeParam = %s", type_param);
+ if (value_param != NULL)
+ printf(" and ");
+ }
+ if (value_param != NULL) {
+ printf("GetParam() = %s", value_param);
+ }
+ }
+}
+
+// This class implements the TestEventListener interface.
+//
+// Class PrettyUnitTestResultPrinter is copyable.
+class PrettyUnitTestResultPrinter : public TestEventListener {
+ public:
+ PrettyUnitTestResultPrinter() {}
+ static void PrintTestName(const char * test_case, const char * test) {
+ printf("%s.%s", test_case, test);
+ }
+
+ // The following methods override what's in the TestEventListener class.
+ virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestCaseStart(const TestCase& test_case);
+ virtual void OnTestStart(const TestInfo& test_info);
+ virtual void OnTestPartResult(const TestPartResult& result);
+ virtual void OnTestEnd(const TestInfo& test_info);
+ virtual void OnTestCaseEnd(const TestCase& test_case);
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+ virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
+
+ private:
+ static void PrintFailedTests(const UnitTest& unit_test);
+
+ internal::String test_case_name_;
+};
+
+ // Fired before each iteration of tests starts.
+void PrettyUnitTestResultPrinter::OnTestIterationStart(
+ const UnitTest& unit_test, int iteration) {
+ if (GTEST_FLAG(repeat) != 1)
+ printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1);
+
+ const char* const filter = GTEST_FLAG(filter).c_str();
+
+ // Prints the filter if it's not *. This reminds the user that some
+ // tests may be skipped.
+ if (!internal::String::CStringEquals(filter, kUniversalFilter)) {
+ ColoredPrintf(COLOR_YELLOW,
+ "Note: %s filter = %s\n", GTEST_NAME_, filter);
+ }
+
+ if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) {
+ const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1);
+ ColoredPrintf(COLOR_YELLOW,
+ "Note: This is test shard %d of %s.\n",
+ static_cast<int>(shard_index) + 1,
+ internal::posix::GetEnv(kTestTotalShards));
+ }
+
+ if (GTEST_FLAG(shuffle)) {
+ ColoredPrintf(COLOR_YELLOW,
+ "Note: Randomizing tests' orders with a seed of %d .\n",
+ unit_test.random_seed());
+ }
+
+ ColoredPrintf(COLOR_GREEN, "[==========] ");
+ printf("Running %s from %s.\n",
+ FormatTestCount(unit_test.test_to_run_count()).c_str(),
+ FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart(
+ const UnitTest& /*unit_test*/) {
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("Global test environment set-up.\n");
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) {
+ test_case_name_ = test_case.name();
+ const internal::String counts =
+ FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("%s from %s", counts.c_str(), test_case_name_.c_str());
+ if (test_case.type_param() == NULL) {
+ printf("\n");
+ } else {
+ printf(", where TypeParam = %s\n", test_case.type_param());
+ }
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) {
+ ColoredPrintf(COLOR_GREEN, "[ RUN ] ");
+ PrintTestName(test_case_name_.c_str(), test_info.name());
+ printf("\n");
+ fflush(stdout);
+}
+
+// Called after an assertion failure.
+void PrettyUnitTestResultPrinter::OnTestPartResult(
+ const TestPartResult& result) {
+ // If the test part succeeded, we don't need to do anything.
+ if (result.type() == TestPartResult::kSuccess)
+ return;
+
+ // Print failure message from the assertion (e.g. expected this and got that).
+ PrintTestPartResult(result);
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) {
+ if (test_info.result()->Passed()) {
+ ColoredPrintf(COLOR_GREEN, "[ OK ] ");
+ } else {
+ ColoredPrintf(COLOR_RED, "[ FAILED ] ");
+ }
+ PrintTestName(test_case_name_.c_str(), test_info.name());
+ if (test_info.result()->Failed())
+ PrintFullTestCommentIfPresent(test_info);
+
+ if (GTEST_FLAG(print_time)) {
+ printf(" (%s ms)\n", internal::StreamableToString(
+ test_info.result()->elapsed_time()).c_str());
+ } else {
+ printf("\n");
+ }
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) {
+ if (!GTEST_FLAG(print_time)) return;
+
+ test_case_name_ = test_case.name();
+ const internal::String counts =
+ FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("%s from %s (%s ms total)\n\n",
+ counts.c_str(), test_case_name_.c_str(),
+ internal::StreamableToString(test_case.elapsed_time()).c_str());
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart(
+ const UnitTest& /*unit_test*/) {
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("Global test environment tear-down\n");
+ fflush(stdout);
+}
+
+// Internal helper for printing the list of failed tests.
+void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) {
+ const int failed_test_count = unit_test.failed_test_count();
+ if (failed_test_count == 0) {
+ return;
+ }
+
+ for (int i = 0; i < unit_test.total_test_case_count(); ++i) {
+ const TestCase& test_case = *unit_test.GetTestCase(i);
+ if (!test_case.should_run() || (test_case.failed_test_count() == 0)) {
+ continue;
+ }
+ for (int j = 0; j < test_case.total_test_count(); ++j) {
+ const TestInfo& test_info = *test_case.GetTestInfo(j);
+ if (!test_info.should_run() || test_info.result()->Passed()) {
+ continue;
+ }
+ ColoredPrintf(COLOR_RED, "[ FAILED ] ");
+ printf("%s.%s", test_case.name(), test_info.name());
+ PrintFullTestCommentIfPresent(test_info);
+ printf("\n");
+ }
+ }
+}
+
+void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+ int /*iteration*/) {
+ ColoredPrintf(COLOR_GREEN, "[==========] ");
+ printf("%s from %s ran.",
+ FormatTestCount(unit_test.test_to_run_count()).c_str(),
+ FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());
+ if (GTEST_FLAG(print_time)) {
+ printf(" (%s ms total)",
+ internal::StreamableToString(unit_test.elapsed_time()).c_str());
+ }
+ printf("\n");
+ ColoredPrintf(COLOR_GREEN, "[ PASSED ] ");
+ printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str());
+
+ int num_failures = unit_test.failed_test_count();
+ if (!unit_test.Passed()) {
+ const int failed_test_count = unit_test.failed_test_count();
+ ColoredPrintf(COLOR_RED, "[ FAILED ] ");
+ printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str());
+ PrintFailedTests(unit_test);
+ printf("\n%2d FAILED %s\n", num_failures,
+ num_failures == 1 ? "TEST" : "TESTS");
+ }
+
+ int num_disabled = unit_test.disabled_test_count();
+ if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) {
+ if (!num_failures) {
+ printf("\n"); // Add a spacer if no FAILURE banner is displayed.
+ }
+ ColoredPrintf(COLOR_YELLOW,
+ " YOU HAVE %d DISABLED %s\n\n",
+ num_disabled,
+ num_disabled == 1 ? "TEST" : "TESTS");
+ }
+ // Ensure that Google Test output is printed before, e.g., heapchecker output.
+ fflush(stdout);
+}
+
+// End PrettyUnitTestResultPrinter
+
+// class TestEventRepeater
+//
+// This class forwards events to other event listeners.
+class TestEventRepeater : public TestEventListener {
+ public:
+ TestEventRepeater() : forwarding_enabled_(true) {}
+ virtual ~TestEventRepeater();
+ void Append(TestEventListener *listener);
+ TestEventListener* Release(TestEventListener* listener);
+
+ // Controls whether events will be forwarded to listeners_. Set to false
+ // in death test child processes.
+ bool forwarding_enabled() const { return forwarding_enabled_; }
+ void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; }
+
+ virtual void OnTestProgramStart(const UnitTest& unit_test);
+ virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test);
+ virtual void OnTestCaseStart(const TestCase& test_case);
+ virtual void OnTestStart(const TestInfo& test_info);
+ virtual void OnTestPartResult(const TestPartResult& result);
+ virtual void OnTestEnd(const TestInfo& test_info);
+ virtual void OnTestCaseEnd(const TestCase& test_case);
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test);
+ virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+ virtual void OnTestProgramEnd(const UnitTest& unit_test);
+
+ private:
+ // Controls whether events will be forwarded to listeners_. Set to false
+ // in death test child processes.
+ bool forwarding_enabled_;
+ // The list of listeners that receive events.
+ std::vector<TestEventListener*> listeners_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater);
+};
+
+TestEventRepeater::~TestEventRepeater() {
+ ForEach(listeners_, Delete<TestEventListener>);
+}
+
+void TestEventRepeater::Append(TestEventListener *listener) {
+ listeners_.push_back(listener);
+}
+
+// TODO(vladl@google.com): Factor the search functionality into Vector::Find.
+TestEventListener* TestEventRepeater::Release(TestEventListener *listener) {
+ for (size_t i = 0; i < listeners_.size(); ++i) {
+ if (listeners_[i] == listener) {
+ listeners_.erase(listeners_.begin() + i);
+ return listener;
+ }
+ }
+
+ return NULL;
+}
+
+// Since most methods are very similar, use macros to reduce boilerplate.
+// This defines a member that forwards the call to all listeners.
+#define GTEST_REPEATER_METHOD_(Name, Type) \
+void TestEventRepeater::Name(const Type& parameter) { \
+ if (forwarding_enabled_) { \
+ for (size_t i = 0; i < listeners_.size(); i++) { \
+ listeners_[i]->Name(parameter); \
+ } \
+ } \
+}
+// This defines a member that forwards the call to all listeners in reverse
+// order.
+#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \
+void TestEventRepeater::Name(const Type& parameter) { \
+ if (forwarding_enabled_) { \
+ for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) { \
+ listeners_[i]->Name(parameter); \
+ } \
+ } \
+}
+
+GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest)
+GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest)
+GTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase)
+GTEST_REPEATER_METHOD_(OnTestStart, TestInfo)
+GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult)
+GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestCase)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest)
+
+#undef GTEST_REPEATER_METHOD_
+#undef GTEST_REVERSE_REPEATER_METHOD_
+
+void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test,
+ int iteration) {
+ if (forwarding_enabled_) {
+ for (size_t i = 0; i < listeners_.size(); i++) {
+ listeners_[i]->OnTestIterationStart(unit_test, iteration);
+ }
+ }
+}
+
+void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test,
+ int iteration) {
+ if (forwarding_enabled_) {
+ for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) {
+ listeners_[i]->OnTestIterationEnd(unit_test, iteration);
+ }
+ }
+}
+
+// End TestEventRepeater
+
+// This class generates an XML output file.
+class XmlUnitTestResultPrinter : public EmptyTestEventListener {
+ public:
+ explicit XmlUnitTestResultPrinter(const char* output_file);
+
+ virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+
+ private:
+ // Is c a whitespace character that is normalized to a space character
+ // when it appears in an XML attribute value?
+ static bool IsNormalizableWhitespace(char c) {
+ return c == 0x9 || c == 0xA || c == 0xD;
+ }
+
+ // May c appear in a well-formed XML document?
+ static bool IsValidXmlCharacter(char c) {
+ return IsNormalizableWhitespace(c) || c >= 0x20;
+ }
+
+ // Returns an XML-escaped copy of the input string str. If
+ // is_attribute is true, the text is meant to appear as an attribute
+ // value, and normalizable whitespace is preserved by replacing it
+ // with character references.
+ static String EscapeXml(const char* str, bool is_attribute);
+
+ // Returns the given string with all characters invalid in XML removed.
+ static string RemoveInvalidXmlCharacters(const string& str);
+
+ // Convenience wrapper around EscapeXml when str is an attribute value.
+ static String EscapeXmlAttribute(const char* str) {
+ return EscapeXml(str, true);
+ }
+
+ // Convenience wrapper around EscapeXml when str is not an attribute value.
+ static String EscapeXmlText(const char* str) { return EscapeXml(str, false); }
+
+ // Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
+ static void OutputXmlCDataSection(::std::ostream* stream, const char* data);
+
+ // Streams an XML representation of a TestInfo object.
+ static void OutputXmlTestInfo(::std::ostream* stream,
+ const char* test_case_name,
+ const TestInfo& test_info);
+
+ // Prints an XML representation of a TestCase object
+ static void PrintXmlTestCase(FILE* out, const TestCase& test_case);
+
+ // Prints an XML summary of unit_test to output stream out.
+ static void PrintXmlUnitTest(FILE* out, const UnitTest& unit_test);
+
+ // Produces a string representing the test properties in a result as space
+ // delimited XML attributes based on the property key="value" pairs.
+ // When the String is not empty, it includes a space at the beginning,
+ // to delimit this attribute from prior attributes.
+ static String TestPropertiesAsXmlAttributes(const TestResult& result);
+
+ // The output file.
+ const String output_file_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter);
+};
+
+// Creates a new XmlUnitTestResultPrinter.
+XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file)
+ : output_file_(output_file) {
+ if (output_file_.c_str() == NULL || output_file_.empty()) {
+ fprintf(stderr, "XML output file may not be null\n");
+ fflush(stderr);
+ exit(EXIT_FAILURE);
+ }
+}
+
+// Called after the unit test ends.
+void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+ int /*iteration*/) {
+ FILE* xmlout = NULL;
+ FilePath output_file(output_file_);
+ FilePath output_dir(output_file.RemoveFileName());
+
+ if (output_dir.CreateDirectoriesRecursively()) {
+ xmlout = posix::FOpen(output_file_.c_str(), "w");
+ }
+ if (xmlout == NULL) {
+ // TODO(wan): report the reason of the failure.
+ //
+ // We don't do it for now as:
+ //
+ // 1. There is no urgent need for it.
+ // 2. It's a bit involved to make the errno variable thread-safe on
+ // all three operating systems (Linux, Windows, and Mac OS).
+ // 3. To interpret the meaning of errno in a thread-safe way,
+ // we need the strerror_r() function, which is not available on
+ // Windows.
+ fprintf(stderr,
+ "Unable to open file \"%s\"\n",
+ output_file_.c_str());
+ fflush(stderr);
+ exit(EXIT_FAILURE);
+ }
+ PrintXmlUnitTest(xmlout, unit_test);
+ fclose(xmlout);
+}
+
+// Returns an XML-escaped copy of the input string str. If is_attribute
+// is true, the text is meant to appear as an attribute value, and
+// normalizable whitespace is preserved by replacing it with character
+// references.
+//
+// Invalid XML characters in str, if any, are stripped from the output.
+// It is expected that most, if not all, of the text processed by this
+// module will consist of ordinary English text.
+// If this module is ever modified to produce version 1.1 XML output,
+// most invalid characters can be retained using character references.
+// TODO(wan): It might be nice to have a minimally invasive, human-readable
+// escaping scheme for invalid characters, rather than dropping them.
+String XmlUnitTestResultPrinter::EscapeXml(const char* str, bool is_attribute) {
+ Message m;
+
+ if (str != NULL) {
+ for (const char* src = str; *src; ++src) {
+ switch (*src) {
+ case '<':
+ m << "&lt;";
+ break;
+ case '>':
+ m << "&gt;";
+ break;
+ case '&':
+ m << "&amp;";
+ break;
+ case '\'':
+ if (is_attribute)
+ m << "&apos;";
+ else
+ m << '\'';
+ break;
+ case '"':
+ if (is_attribute)
+ m << "&quot;";
+ else
+ m << '"';
+ break;
+ default:
+ if (IsValidXmlCharacter(*src)) {
+ if (is_attribute && IsNormalizableWhitespace(*src))
+ m << String::Format("&#x%02X;", unsigned(*src));
+ else
+ m << *src;
+ }
+ break;
+ }
+ }
+ }
+
+ return m.GetString();
+}
+
+// Returns the given string with all characters invalid in XML removed.
+// Currently invalid characters are dropped from the string. An
+// alternative is to replace them with certain characters such as . or ?.
+string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters(const string& str) {
+ string output;
+ output.reserve(str.size());
+ for (string::const_iterator it = str.begin(); it != str.end(); ++it)
+ if (IsValidXmlCharacter(*it))
+ output.push_back(*it);
+
+ return output;
+}
+
+// The following routines generate an XML representation of a UnitTest
+// object.
+//
+// This is how Google Test concepts map to the DTD:
+//
+// <testsuites name="AllTests"> <-- corresponds to a UnitTest object
+// <testsuite name="testcase-name"> <-- corresponds to a TestCase object
+// <testcase name="test-name"> <-- corresponds to a TestInfo object
+// <failure message="...">...</failure>
+// <failure message="...">...</failure>
+// <failure message="...">...</failure>
+// <-- individual assertion failures
+// </testcase>
+// </testsuite>
+// </testsuites>
+
+// Formats the given time in milliseconds as seconds.
+std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) {
+ ::std::stringstream ss;
+ ss << ms/1000.0;
+ return ss.str();
+}
+
+// Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
+void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream,
+ const char* data) {
+ const char* segment = data;
+ *stream << "<![CDATA[";
+ for (;;) {
+ const char* const next_segment = strstr(segment, "]]>");
+ if (next_segment != NULL) {
+ stream->write(
+ segment, static_cast<std::streamsize>(next_segment - segment));
+ *stream << "]]>]]&gt;<![CDATA[";
+ segment = next_segment + strlen("]]>");
+ } else {
+ *stream << segment;
+ break;
+ }
+ }
+ *stream << "]]>";
+}
+
+// Prints an XML representation of a TestInfo object.
+// TODO(wan): There is also value in printing properties with the plain printer.
+void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream,
+ const char* test_case_name,
+ const TestInfo& test_info) {
+ const TestResult& result = *test_info.result();
+ *stream << " <testcase name=\""
+ << EscapeXmlAttribute(test_info.name()).c_str() << "\"";
+
+ if (test_info.value_param() != NULL) {
+ *stream << " value_param=\"" << EscapeXmlAttribute(test_info.value_param())
+ << "\"";
+ }
+ if (test_info.type_param() != NULL) {
+ *stream << " type_param=\"" << EscapeXmlAttribute(test_info.type_param())
+ << "\"";
+ }
+
+ *stream << " status=\""
+ << (test_info.should_run() ? "run" : "notrun")
+ << "\" time=\""
+ << FormatTimeInMillisAsSeconds(result.elapsed_time())
+ << "\" classname=\"" << EscapeXmlAttribute(test_case_name).c_str()
+ << "\"" << TestPropertiesAsXmlAttributes(result).c_str();
+
+ int failures = 0;
+ for (int i = 0; i < result.total_part_count(); ++i) {
+ const TestPartResult& part = result.GetTestPartResult(i);
+ if (part.failed()) {
+ if (++failures == 1)
+ *stream << ">\n";
+ *stream << " <failure message=\""
+ << EscapeXmlAttribute(part.summary()).c_str()
+ << "\" type=\"\">";
+ const string location = internal::FormatCompilerIndependentFileLocation(
+ part.file_name(), part.line_number());
+ const string message = location + "\n" + part.message();
+ OutputXmlCDataSection(stream,
+ RemoveInvalidXmlCharacters(message).c_str());
+ *stream << "</failure>\n";
+ }
+ }
+
+ if (failures == 0)
+ *stream << " />\n";
+ else
+ *stream << " </testcase>\n";
+}
+
+// Prints an XML representation of a TestCase object
+void XmlUnitTestResultPrinter::PrintXmlTestCase(FILE* out,
+ const TestCase& test_case) {
+ fprintf(out,
+ " <testsuite name=\"%s\" tests=\"%d\" failures=\"%d\" "
+ "disabled=\"%d\" ",
+ EscapeXmlAttribute(test_case.name()).c_str(),
+ test_case.total_test_count(),
+ test_case.failed_test_count(),
+ test_case.disabled_test_count());
+ fprintf(out,
+ "errors=\"0\" time=\"%s\">\n",
+ FormatTimeInMillisAsSeconds(test_case.elapsed_time()).c_str());
+ for (int i = 0; i < test_case.total_test_count(); ++i) {
+ ::std::stringstream stream;
+ OutputXmlTestInfo(&stream, test_case.name(), *test_case.GetTestInfo(i));
+ fprintf(out, "%s", StringStreamToString(&stream).c_str());
+ }
+ fprintf(out, " </testsuite>\n");
+}
+
+// Prints an XML summary of unit_test to output stream out.
+void XmlUnitTestResultPrinter::PrintXmlUnitTest(FILE* out,
+ const UnitTest& unit_test) {
+ fprintf(out, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
+ fprintf(out,
+ "<testsuites tests=\"%d\" failures=\"%d\" disabled=\"%d\" "
+ "errors=\"0\" time=\"%s\" ",
+ unit_test.total_test_count(),
+ unit_test.failed_test_count(),
+ unit_test.disabled_test_count(),
+ FormatTimeInMillisAsSeconds(unit_test.elapsed_time()).c_str());
+ if (GTEST_FLAG(shuffle)) {
+ fprintf(out, "random_seed=\"%d\" ", unit_test.random_seed());
+ }
+ fprintf(out, "name=\"AllTests\">\n");
+ for (int i = 0; i < unit_test.total_test_case_count(); ++i)
+ PrintXmlTestCase(out, *unit_test.GetTestCase(i));
+ fprintf(out, "</testsuites>\n");
+}
+
+// Produces a string representing the test properties in a result as space
+// delimited XML attributes based on the property key="value" pairs.
+String XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes(
+ const TestResult& result) {
+ Message attributes;
+ for (int i = 0; i < result.test_property_count(); ++i) {
+ const TestProperty& property = result.GetTestProperty(i);
+ attributes << " " << property.key() << "="
+ << "\"" << EscapeXmlAttribute(property.value()) << "\"";
+ }
+ return attributes.GetString();
+}
+
+// End XmlUnitTestResultPrinter
+
+#if GTEST_CAN_STREAM_RESULTS_
+
+// Streams test results to the given port on the given host machine.
+class StreamingListener : public EmptyTestEventListener {
+ public:
+ // Escapes '=', '&', '%', and '\n' characters in str as "%xx".
+ static string UrlEncode(const char* str);
+
+ StreamingListener(const string& host, const string& port)
+ : sockfd_(-1), host_name_(host), port_num_(port) {
+ MakeConnection();
+ Send("gtest_streaming_protocol_version=1.0\n");
+ }
+
+ virtual ~StreamingListener() {
+ if (sockfd_ != -1)
+ CloseConnection();
+ }
+
+ void OnTestProgramStart(const UnitTest& /* unit_test */) {
+ Send("event=TestProgramStart\n");
+ }
+
+ void OnTestProgramEnd(const UnitTest& unit_test) {
+ // Note that Google Test current only report elapsed time for each
+ // test iteration, not for the entire test program.
+ Send(String::Format("event=TestProgramEnd&passed=%d\n",
+ unit_test.Passed()));
+
+ // Notify the streaming server to stop.
+ CloseConnection();
+ }
+
+ void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) {
+ Send(String::Format("event=TestIterationStart&iteration=%d\n",
+ iteration));
+ }
+
+ void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) {
+ Send(String::Format("event=TestIterationEnd&passed=%d&elapsed_time=%sms\n",
+ unit_test.Passed(),
+ StreamableToString(unit_test.elapsed_time()).c_str()));
+ }
+
+ void OnTestCaseStart(const TestCase& test_case) {
+ Send(String::Format("event=TestCaseStart&name=%s\n", test_case.name()));
+ }
+
+ void OnTestCaseEnd(const TestCase& test_case) {
+ Send(String::Format("event=TestCaseEnd&passed=%d&elapsed_time=%sms\n",
+ test_case.Passed(),
+ StreamableToString(test_case.elapsed_time()).c_str()));
+ }
+
+ void OnTestStart(const TestInfo& test_info) {
+ Send(String::Format("event=TestStart&name=%s\n", test_info.name()));
+ }
+
+ void OnTestEnd(const TestInfo& test_info) {
+ Send(String::Format(
+ "event=TestEnd&passed=%d&elapsed_time=%sms\n",
+ (test_info.result())->Passed(),
+ StreamableToString((test_info.result())->elapsed_time()).c_str()));
+ }
+
+ void OnTestPartResult(const TestPartResult& test_part_result) {
+ const char* file_name = test_part_result.file_name();
+ if (file_name == NULL)
+ file_name = "";
+ Send(String::Format("event=TestPartResult&file=%s&line=%d&message=",
+ UrlEncode(file_name).c_str(),
+ test_part_result.line_number()));
+ Send(UrlEncode(test_part_result.message()) + "\n");
+ }
+
+ private:
+ // Creates a client socket and connects to the server.
+ void MakeConnection();
+
+ // Closes the socket.
+ void CloseConnection() {
+ GTEST_CHECK_(sockfd_ != -1)
+ << "CloseConnection() can be called only when there is a connection.";
+
+ close(sockfd_);
+ sockfd_ = -1;
+ }
+
+ // Sends a string to the socket.
+ void Send(const string& message) {
+ GTEST_CHECK_(sockfd_ != -1)
+ << "Send() can be called only when there is a connection.";
+
+ const int len = static_cast<int>(message.length());
+ if (write(sockfd_, message.c_str(), len) != len) {
+ GTEST_LOG_(WARNING)
+ << "stream_result_to: failed to stream to "
+ << host_name_ << ":" << port_num_;
+ }
+ }
+
+ int sockfd_; // socket file descriptor
+ const string host_name_;
+ const string port_num_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener);
+}; // class StreamingListener
+
+// Checks if str contains '=', '&', '%' or '\n' characters. If yes,
+// replaces them by "%xx" where xx is their hexadecimal value. For
+// example, replaces "=" with "%3D". This algorithm is O(strlen(str))
+// in both time and space -- important as the input str may contain an
+// arbitrarily long test failure message and stack trace.
+string StreamingListener::UrlEncode(const char* str) {
+ string result;
+ result.reserve(strlen(str) + 1);
+ for (char ch = *str; ch != '\0'; ch = *++str) {
+ switch (ch) {
+ case '%':
+ case '=':
+ case '&':
+ case '\n':
+ result.append(String::Format("%%%02x", static_cast<unsigned char>(ch)));
+ break;
+ default:
+ result.push_back(ch);
+ break;
+ }
+ }
+ return result;
+}
+
+void StreamingListener::MakeConnection() {
+ GTEST_CHECK_(sockfd_ == -1)
+ << "MakeConnection() can't be called when there is already a connection.";
+
+ addrinfo hints;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC; // To allow both IPv4 and IPv6 addresses.
+ hints.ai_socktype = SOCK_STREAM;
+ addrinfo* servinfo = NULL;
+
+ // Use the getaddrinfo() to get a linked list of IP addresses for
+ // the given host name.
+ const int error_num = getaddrinfo(
+ host_name_.c_str(), port_num_.c_str(), &hints, &servinfo);
+ if (error_num != 0) {
+ GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: "
+ << gai_strerror(error_num);
+ }
+
+ // Loop through all the results and connect to the first we can.
+ for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != NULL;
+ cur_addr = cur_addr->ai_next) {
+ sockfd_ = socket(
+ cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol);
+ if (sockfd_ != -1) {
+ // Connect the client socket to the server socket.
+ if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) {
+ close(sockfd_);
+ sockfd_ = -1;
+ }
+ }
+ }
+
+ freeaddrinfo(servinfo); // all done with this structure
+
+ if (sockfd_ == -1) {
+ GTEST_LOG_(WARNING) << "stream_result_to: failed to connect to "
+ << host_name_ << ":" << port_num_;
+ }
+}
+
+// End of class Streaming Listener
+#endif // GTEST_CAN_STREAM_RESULTS__
+
+// Class ScopedTrace
+
+// Pushes the given source file location and message onto a per-thread
+// trace stack maintained by Google Test.
+// L < UnitTest::mutex_
+ScopedTrace::ScopedTrace(const char* file, int line, const Message& message) {
+ TraceInfo trace;
+ trace.file = file;
+ trace.line = line;
+ trace.message = message.GetString();
+
+ UnitTest::GetInstance()->PushGTestTrace(trace);
+}
+
+// Pops the info pushed by the c'tor.
+// L < UnitTest::mutex_
+ScopedTrace::~ScopedTrace() {
+ UnitTest::GetInstance()->PopGTestTrace();
+}
+
+
+// class OsStackTraceGetter
+
+// Returns the current OS stack trace as a String. Parameters:
+//
+// max_depth - the maximum number of stack frames to be included
+// in the trace.
+// skip_count - the number of top frames to be skipped; doesn't count
+// against max_depth.
+//
+// L < mutex_
+// We use "L < mutex_" to denote that the function may acquire mutex_.
+String OsStackTraceGetter::CurrentStackTrace(int, int) {
+ return String("");
+}
+
+// L < mutex_
+void OsStackTraceGetter::UponLeavingGTest() {
+}
+
+const char* const
+OsStackTraceGetter::kElidedFramesMarker =
+ "... " GTEST_NAME_ " internal frames ...";
+
+} // namespace internal
+
+// class TestEventListeners
+
+TestEventListeners::TestEventListeners()
+ : repeater_(new internal::TestEventRepeater()),
+ default_result_printer_(NULL),
+ default_xml_generator_(NULL) {
+}
+
+TestEventListeners::~TestEventListeners() { delete repeater_; }
+
+// Returns the standard listener responsible for the default console
+// output. Can be removed from the listeners list to shut down default
+// console output. Note that removing this object from the listener list
+// with Release transfers its ownership to the user.
+void TestEventListeners::Append(TestEventListener* listener) {
+ repeater_->Append(listener);
+}
+
+// Removes the given event listener from the list and returns it. It then
+// becomes the caller's responsibility to delete the listener. Returns
+// NULL if the listener is not found in the list.
+TestEventListener* TestEventListeners::Release(TestEventListener* listener) {
+ if (listener == default_result_printer_)
+ default_result_printer_ = NULL;
+ else if (listener == default_xml_generator_)
+ default_xml_generator_ = NULL;
+ return repeater_->Release(listener);
+}
+
+// Returns repeater that broadcasts the TestEventListener events to all
+// subscribers.
+TestEventListener* TestEventListeners::repeater() { return repeater_; }
+
+// Sets the default_result_printer attribute to the provided listener.
+// The listener is also added to the listener list and previous
+// default_result_printer is removed from it and deleted. The listener can
+// also be NULL in which case it will not be added to the list. Does
+// nothing if the previous and the current listener objects are the same.
+void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) {
+ if (default_result_printer_ != listener) {
+ // It is an error to pass this method a listener that is already in the
+ // list.
+ delete Release(default_result_printer_);
+ default_result_printer_ = listener;
+ if (listener != NULL)
+ Append(listener);
+ }
+}
+
+// Sets the default_xml_generator attribute to the provided listener. The
+// listener is also added to the listener list and previous
+// default_xml_generator is removed from it and deleted. The listener can
+// also be NULL in which case it will not be added to the list. Does
+// nothing if the previous and the current listener objects are the same.
+void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) {
+ if (default_xml_generator_ != listener) {
+ // It is an error to pass this method a listener that is already in the
+ // list.
+ delete Release(default_xml_generator_);
+ default_xml_generator_ = listener;
+ if (listener != NULL)
+ Append(listener);
+ }
+}
+
+// Controls whether events will be forwarded by the repeater to the
+// listeners in the list.
+bool TestEventListeners::EventForwardingEnabled() const {
+ return repeater_->forwarding_enabled();
+}
+
+void TestEventListeners::SuppressEventForwarding() {
+ repeater_->set_forwarding_enabled(false);
+}
+
+// class UnitTest
+
+// Gets the singleton UnitTest object. The first time this method is
+// called, a UnitTest object is constructed and returned. Consecutive
+// calls will return the same object.
+//
+// We don't protect this under mutex_ as a user is not supposed to
+// call this before main() starts, from which point on the return
+// value will never change.
+UnitTest * UnitTest::GetInstance() {
+ // When compiled with MSVC 7.1 in optimized mode, destroying the
+ // UnitTest object upon exiting the program messes up the exit code,
+ // causing successful tests to appear failed. We have to use a
+ // different implementation in this case to bypass the compiler bug.
+ // This implementation makes the compiler happy, at the cost of
+ // leaking the UnitTest object.
+
+ // CodeGear C++Builder insists on a public destructor for the
+ // default implementation. Use this implementation to keep good OO
+ // design with private destructor.
+
+#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+ static UnitTest* const instance = new UnitTest;
+ return instance;
+#else
+ static UnitTest instance;
+ return &instance;
+#endif // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+}
+
+// Gets the number of successful test cases.
+int UnitTest::successful_test_case_count() const {
+ return impl()->successful_test_case_count();
+}
+
+// Gets the number of failed test cases.
+int UnitTest::failed_test_case_count() const {
+ return impl()->failed_test_case_count();
+}
+
+// Gets the number of all test cases.
+int UnitTest::total_test_case_count() const {
+ return impl()->total_test_case_count();
+}
+
+// Gets the number of all test cases that contain at least one test
+// that should run.
+int UnitTest::test_case_to_run_count() const {
+ return impl()->test_case_to_run_count();
+}
+
+// Gets the number of successful tests.
+int UnitTest::successful_test_count() const {
+ return impl()->successful_test_count();
+}
+
+// Gets the number of failed tests.
+int UnitTest::failed_test_count() const { return impl()->failed_test_count(); }
+
+// Gets the number of disabled tests.
+int UnitTest::disabled_test_count() const {
+ return impl()->disabled_test_count();
+}
+
+// Gets the number of all tests.
+int UnitTest::total_test_count() const { return impl()->total_test_count(); }
+
+// Gets the number of tests that should run.
+int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); }
+
+// Gets the elapsed time, in milliseconds.
+internal::TimeInMillis UnitTest::elapsed_time() const {
+ return impl()->elapsed_time();
+}
+
+// Returns true iff the unit test passed (i.e. all test cases passed).
+bool UnitTest::Passed() const { return impl()->Passed(); }
+
+// Returns true iff the unit test failed (i.e. some test case failed
+// or something outside of all tests failed).
+bool UnitTest::Failed() const { return impl()->Failed(); }
+
+// Gets the i-th test case among all the test cases. i can range from 0 to
+// total_test_case_count() - 1. If i is not in that range, returns NULL.
+const TestCase* UnitTest::GetTestCase(int i) const {
+ return impl()->GetTestCase(i);
+}
+
+// Gets the i-th test case among all the test cases. i can range from 0 to
+// total_test_case_count() - 1. If i is not in that range, returns NULL.
+TestCase* UnitTest::GetMutableTestCase(int i) {
+ return impl()->GetMutableTestCase(i);
+}
+
+// Returns the list of event listeners that can be used to track events
+// inside Google Test.
+TestEventListeners& UnitTest::listeners() {
+ return *impl()->listeners();
+}
+
+// Registers and returns a global test environment. When a test
+// program is run, all global test environments will be set-up in the
+// order they were registered. After all tests in the program have
+// finished, all global test environments will be torn-down in the
+// *reverse* order they were registered.
+//
+// The UnitTest object takes ownership of the given environment.
+//
+// We don't protect this under mutex_, as we only support calling it
+// from the main thread.
+Environment* UnitTest::AddEnvironment(Environment* env) {
+ if (env == NULL) {
+ return NULL;
+ }
+
+ impl_->environments().push_back(env);
+ return env;
+}
+
+// Adds a TestPartResult to the current TestResult object. All Google Test
+// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call
+// this to report their results. The user code should use the
+// assertion macros instead of calling this directly.
+// L < mutex_
+void UnitTest::AddTestPartResult(TestPartResult::Type result_type,
+ const char* file_name,
+ int line_number,
+ const internal::String& message,
+ const internal::String& os_stack_trace) {
+ Message msg;
+ msg << message;
+
+ internal::MutexLock lock(&mutex_);
+ if (impl_->gtest_trace_stack().size() > 0) {
+ msg << "\n" << GTEST_NAME_ << " trace:";
+
+ for (int i = static_cast<int>(impl_->gtest_trace_stack().size());
+ i > 0; --i) {
+ const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1];
+ msg << "\n" << internal::FormatFileLocation(trace.file, trace.line)
+ << " " << trace.message;
+ }
+ }
+
+ if (os_stack_trace.c_str() != NULL && !os_stack_trace.empty()) {
+ msg << internal::kStackTraceMarker << os_stack_trace;
+ }
+
+ const TestPartResult result =
+ TestPartResult(result_type, file_name, line_number,
+ msg.GetString().c_str());
+ impl_->GetTestPartResultReporterForCurrentThread()->
+ ReportTestPartResult(result);
+
+ if (result_type != TestPartResult::kSuccess) {
+ // gtest_break_on_failure takes precedence over
+ // gtest_throw_on_failure. This allows a user to set the latter
+ // in the code (perhaps in order to use Google Test assertions
+ // with another testing framework) and specify the former on the
+ // command line for debugging.
+ if (GTEST_FLAG(break_on_failure)) {
+#if GTEST_OS_WINDOWS
+ // Using DebugBreak on Windows allows gtest to still break into a debugger
+ // when a failure happens and both the --gtest_break_on_failure and
+ // the --gtest_catch_exceptions flags are specified.
+ DebugBreak();
+#else
+ // Dereference NULL through a volatile pointer to prevent the compiler
+ // from removing. We use this rather than abort() or __builtin_trap() for
+ // portability: Symbian doesn't implement abort() well, and some debuggers
+ // don't correctly trap abort().
+ *static_cast<volatile int*>(NULL) = 1;
+#endif // GTEST_OS_WINDOWS
+ } else if (GTEST_FLAG(throw_on_failure)) {
+#if GTEST_HAS_EXCEPTIONS
+ throw GoogleTestFailureException(result);
+#else
+ // We cannot call abort() as it generates a pop-up in debug mode
+ // that cannot be suppressed in VC 7.1 or below.
+ exit(1);
+#endif
+ }
+ }
+}
+
+// Creates and adds a property to the current TestResult. If a property matching
+// the supplied value already exists, updates its value instead.
+void UnitTest::RecordPropertyForCurrentTest(const char* key,
+ const char* value) {
+ const TestProperty test_property(key, value);
+ impl_->current_test_result()->RecordProperty(test_property);
+}
+
+// Runs all tests in this UnitTest object and prints the result.
+// Returns 0 if successful, or 1 otherwise.
+//
+// We don't protect this under mutex_, as we only support calling it
+// from the main thread.
+int UnitTest::Run() {
+ // Captures the value of GTEST_FLAG(catch_exceptions). This value will be
+ // used for the duration of the program.
+ impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions));
+
+#if GTEST_HAS_SEH
+ const bool in_death_test_child_process =
+ internal::GTEST_FLAG(internal_run_death_test).length() > 0;
+
+ // Either the user wants Google Test to catch exceptions thrown by the
+ // tests or this is executing in the context of death test child
+ // process. In either case the user does not want to see pop-up dialogs
+ // about crashes - they are expected.
+ if (impl()->catch_exceptions() || in_death_test_child_process) {
+
+# if !GTEST_OS_WINDOWS_MOBILE
+ // SetErrorMode doesn't exist on CE.
+ SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT |
+ SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX);
+# endif // !GTEST_OS_WINDOWS_MOBILE
+
+# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE
+ // Death test children can be terminated with _abort(). On Windows,
+ // _abort() can show a dialog with a warning message. This forces the
+ // abort message to go to stderr instead.
+ _set_error_mode(_OUT_TO_STDERR);
+# endif
+
+# if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE
+ // In the debug version, Visual Studio pops up a separate dialog
+ // offering a choice to debug the aborted program. We need to suppress
+ // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement
+ // executed. Google Test will notify the user of any unexpected
+ // failure via stderr.
+ //
+ // VC++ doesn't define _set_abort_behavior() prior to the version 8.0.
+ // Users of prior VC versions shall suffer the agony and pain of
+ // clicking through the countless debug dialogs.
+ // TODO(vladl@google.com): find a way to suppress the abort dialog() in the
+ // debug mode when compiled with VC 7.1 or lower.
+ if (!GTEST_FLAG(break_on_failure))
+ _set_abort_behavior(
+ 0x0, // Clear the following flags:
+ _WRITE_ABORT_MSG | _CALL_REPORTFAULT); // pop-up window, core dump.
+# endif
+
+ }
+#endif // GTEST_HAS_SEH
+
+ return internal::HandleExceptionsInMethodIfSupported(
+ impl(),
+ &internal::UnitTestImpl::RunAllTests,
+ "auxiliary test code (environments or event listeners)") ? 0 : 1;
+}
+
+// Returns the working directory when the first TEST() or TEST_F() was
+// executed.
+const char* UnitTest::original_working_dir() const {
+ return impl_->original_working_dir_.c_str();
+}
+
+// Returns the TestCase object for the test that's currently running,
+// or NULL if no test is running.
+// L < mutex_
+const TestCase* UnitTest::current_test_case() const {
+ internal::MutexLock lock(&mutex_);
+ return impl_->current_test_case();
+}
+
+// Returns the TestInfo object for the test that's currently running,
+// or NULL if no test is running.
+// L < mutex_
+const TestInfo* UnitTest::current_test_info() const {
+ internal::MutexLock lock(&mutex_);
+ return impl_->current_test_info();
+}
+
+// Returns the random seed used at the start of the current test run.
+int UnitTest::random_seed() const { return impl_->random_seed(); }
+
+#if GTEST_HAS_PARAM_TEST
+// Returns ParameterizedTestCaseRegistry object used to keep track of
+// value-parameterized tests and instantiate and register them.
+// L < mutex_
+internal::ParameterizedTestCaseRegistry&
+ UnitTest::parameterized_test_registry() {
+ return impl_->parameterized_test_registry();
+}
+#endif // GTEST_HAS_PARAM_TEST
+
+// Creates an empty UnitTest.
+UnitTest::UnitTest() {
+ impl_ = new internal::UnitTestImpl(this);
+}
+
+// Destructor of UnitTest.
+UnitTest::~UnitTest() {
+ delete impl_;
+}
+
+// Pushes a trace defined by SCOPED_TRACE() on to the per-thread
+// Google Test trace stack.
+// L < mutex_
+void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) {
+ internal::MutexLock lock(&mutex_);
+ impl_->gtest_trace_stack().push_back(trace);
+}
+
+// Pops a trace from the per-thread Google Test trace stack.
+// L < mutex_
+void UnitTest::PopGTestTrace() {
+ internal::MutexLock lock(&mutex_);
+ impl_->gtest_trace_stack().pop_back();
+}
+
+namespace internal {
+
+UnitTestImpl::UnitTestImpl(UnitTest* parent)
+ : parent_(parent),
+#ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4355) // Temporarily disables warning 4355
+ // (using this in initializer).
+ default_global_test_part_result_reporter_(this),
+ default_per_thread_test_part_result_reporter_(this),
+# pragma warning(pop) // Restores the warning state again.
+#else
+ default_global_test_part_result_reporter_(this),
+ default_per_thread_test_part_result_reporter_(this),
+#endif // _MSC_VER
+ global_test_part_result_repoter_(
+ &default_global_test_part_result_reporter_),
+ per_thread_test_part_result_reporter_(
+ &default_per_thread_test_part_result_reporter_),
+#if GTEST_HAS_PARAM_TEST
+ parameterized_test_registry_(),
+ parameterized_tests_registered_(false),
+#endif // GTEST_HAS_PARAM_TEST
+ last_death_test_case_(-1),
+ current_test_case_(NULL),
+ current_test_info_(NULL),
+ ad_hoc_test_result_(),
+ os_stack_trace_getter_(NULL),
+ post_flag_parse_init_performed_(false),
+ random_seed_(0), // Will be overridden by the flag before first use.
+ random_(0), // Will be reseeded before first use.
+ elapsed_time_(0),
+#if GTEST_HAS_DEATH_TEST
+ internal_run_death_test_flag_(NULL),
+ death_test_factory_(new DefaultDeathTestFactory),
+#endif
+ // Will be overridden by the flag before first use.
+ catch_exceptions_(false) {
+ listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter);
+}
+
+UnitTestImpl::~UnitTestImpl() {
+ // Deletes every TestCase.
+ ForEach(test_cases_, internal::Delete<TestCase>);
+
+ // Deletes every Environment.
+ ForEach(environments_, internal::Delete<Environment>);
+
+ delete os_stack_trace_getter_;
+}
+
+#if GTEST_HAS_DEATH_TEST
+// Disables event forwarding if the control is currently in a death test
+// subprocess. Must not be called before InitGoogleTest.
+void UnitTestImpl::SuppressTestEventsIfInSubprocess() {
+ if (internal_run_death_test_flag_.get() != NULL)
+ listeners()->SuppressEventForwarding();
+}
+#endif // GTEST_HAS_DEATH_TEST
+
+// Initializes event listeners performing XML output as specified by
+// UnitTestOptions. Must not be called before InitGoogleTest.
+void UnitTestImpl::ConfigureXmlOutput() {
+ const String& output_format = UnitTestOptions::GetOutputFormat();
+ if (output_format == "xml") {
+ listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter(
+ UnitTestOptions::GetAbsolutePathToOutputFile().c_str()));
+ } else if (output_format != "") {
+ printf("WARNING: unrecognized output format \"%s\" ignored.\n",
+ output_format.c_str());
+ fflush(stdout);
+ }
+}
+
+#if GTEST_CAN_STREAM_RESULTS_
+// Initializes event listeners for streaming test results in String form.
+// Must not be called before InitGoogleTest.
+void UnitTestImpl::ConfigureStreamingOutput() {
+ const string& target = GTEST_FLAG(stream_result_to);
+ if (!target.empty()) {
+ const size_t pos = target.find(':');
+ if (pos != string::npos) {
+ listeners()->Append(new StreamingListener(target.substr(0, pos),
+ target.substr(pos+1)));
+ } else {
+ printf("WARNING: unrecognized streaming target \"%s\" ignored.\n",
+ target.c_str());
+ fflush(stdout);
+ }
+ }
+}
+#endif // GTEST_CAN_STREAM_RESULTS_
+
+// Performs initialization dependent upon flag values obtained in
+// ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to
+// ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest
+// this function is also called from RunAllTests. Since this function can be
+// called more than once, it has to be idempotent.
+void UnitTestImpl::PostFlagParsingInit() {
+ // Ensures that this function does not execute more than once.
+ if (!post_flag_parse_init_performed_) {
+ post_flag_parse_init_performed_ = true;
+
+#if GTEST_HAS_DEATH_TEST
+ InitDeathTestSubprocessControlInfo();
+ SuppressTestEventsIfInSubprocess();
+#endif // GTEST_HAS_DEATH_TEST
+
+ // Registers parameterized tests. This makes parameterized tests
+ // available to the UnitTest reflection API without running
+ // RUN_ALL_TESTS.
+ RegisterParameterizedTests();
+
+ // Configures listeners for XML output. This makes it possible for users
+ // to shut down the default XML output before invoking RUN_ALL_TESTS.
+ ConfigureXmlOutput();
+
+#if GTEST_CAN_STREAM_RESULTS_
+ // Configures listeners for streaming test results to the specified server.
+ ConfigureStreamingOutput();
+#endif // GTEST_CAN_STREAM_RESULTS_
+ }
+}
+
+// A predicate that checks the name of a TestCase against a known
+// value.
+//
+// This is used for implementation of the UnitTest class only. We put
+// it in the anonymous namespace to prevent polluting the outer
+// namespace.
+//
+// TestCaseNameIs is copyable.
+class TestCaseNameIs {
+ public:
+ // Constructor.
+ explicit TestCaseNameIs(const String& name)
+ : name_(name) {}
+
+ // Returns true iff the name of test_case matches name_.
+ bool operator()(const TestCase* test_case) const {
+ return test_case != NULL && strcmp(test_case->name(), name_.c_str()) == 0;
+ }
+
+ private:
+ String name_;
+};
+
+// Finds and returns a TestCase with the given name. If one doesn't
+// exist, creates one and returns it. It's the CALLER'S
+// RESPONSIBILITY to ensure that this function is only called WHEN THE
+// TESTS ARE NOT SHUFFLED.
+//
+// Arguments:
+//
+// test_case_name: name of the test case
+// type_param: the name of the test case's type parameter, or NULL if
+// this is not a typed or a type-parameterized test case.
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+TestCase* UnitTestImpl::GetTestCase(const char* test_case_name,
+ const char* type_param,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc) {
+ // Can we find a TestCase with the given name?
+ const std::vector<TestCase*>::const_iterator test_case =
+ std::find_if(test_cases_.begin(), test_cases_.end(),
+ TestCaseNameIs(test_case_name));
+
+ if (test_case != test_cases_.end())
+ return *test_case;
+
+ // No. Let's create one.
+ TestCase* const new_test_case =
+ new TestCase(test_case_name, type_param, set_up_tc, tear_down_tc);
+
+ // Is this a death test case?
+ if (internal::UnitTestOptions::MatchesFilter(String(test_case_name),
+ kDeathTestCaseFilter)) {
+ // Yes. Inserts the test case after the last death test case
+ // defined so far. This only works when the test cases haven't
+ // been shuffled. Otherwise we may end up running a death test
+ // after a non-death test.
+ ++last_death_test_case_;
+ test_cases_.insert(test_cases_.begin() + last_death_test_case_,
+ new_test_case);
+ } else {
+ // No. Appends to the end of the list.
+ test_cases_.push_back(new_test_case);
+ }
+
+ test_case_indices_.push_back(static_cast<int>(test_case_indices_.size()));
+ return new_test_case;
+}
+
+// Helpers for setting up / tearing down the given environment. They
+// are for use in the ForEach() function.
+static void SetUpEnvironment(Environment* env) { env->SetUp(); }
+static void TearDownEnvironment(Environment* env) { env->TearDown(); }
+
+// Runs all tests in this UnitTest object, prints the result, and
+// returns true if all tests are successful. If any exception is
+// thrown during a test, the test is considered to be failed, but the
+// rest of the tests will still be run.
+//
+// When parameterized tests are enabled, it expands and registers
+// parameterized tests first in RegisterParameterizedTests().
+// All other functions called from RunAllTests() may safely assume that
+// parameterized tests are ready to be counted and run.
+bool UnitTestImpl::RunAllTests() {
+ // Makes sure InitGoogleTest() was called.
+ if (!GTestIsInitialized()) {
+ printf("%s",
+ "\nThis test program did NOT call ::testing::InitGoogleTest "
+ "before calling RUN_ALL_TESTS(). Please fix it.\n");
+ return false;
+ }
+
+ // Do not run any test if the --help flag was specified.
+ if (g_help_flag)
+ return true;
+
+ // Repeats the call to the post-flag parsing initialization in case the
+ // user didn't call InitGoogleTest.
+ PostFlagParsingInit();
+
+ // Even if sharding is not on, test runners may want to use the
+ // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding
+ // protocol.
+ internal::WriteToShardStatusFileIfNeeded();
+
+ // True iff we are in a subprocess for running a thread-safe-style
+ // death test.
+ bool in_subprocess_for_death_test = false;
+
+#if GTEST_HAS_DEATH_TEST
+ in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL);
+#endif // GTEST_HAS_DEATH_TEST
+
+ const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex,
+ in_subprocess_for_death_test);
+
+ // Compares the full test names with the filter to decide which
+ // tests to run.
+ const bool has_tests_to_run = FilterTests(should_shard
+ ? HONOR_SHARDING_PROTOCOL
+ : IGNORE_SHARDING_PROTOCOL) > 0;
+
+ // Lists the tests and exits if the --gtest_list_tests flag was specified.
+ if (GTEST_FLAG(list_tests)) {
+ // This must be called *after* FilterTests() has been called.
+ ListTestsMatchingFilter();
+ return true;
+ }
+
+ random_seed_ = GTEST_FLAG(shuffle) ?
+ GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0;
+
+ // True iff at least one test has failed.
+ bool failed = false;
+
+ TestEventListener* repeater = listeners()->repeater();
+
+ repeater->OnTestProgramStart(*parent_);
+
+ // How many times to repeat the tests? We don't want to repeat them
+ // when we are inside the subprocess of a death test.
+ const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat);
+ // Repeats forever if the repeat count is negative.
+ const bool forever = repeat < 0;
+ for (int i = 0; forever || i != repeat; i++) {
+ // We want to preserve failures generated by ad-hoc test
+ // assertions executed before RUN_ALL_TESTS().
+ ClearNonAdHocTestResult();
+
+ const TimeInMillis start = GetTimeInMillis();
+
+ // Shuffles test cases and tests if requested.
+ if (has_tests_to_run && GTEST_FLAG(shuffle)) {
+ random()->Reseed(random_seed_);
+ // This should be done before calling OnTestIterationStart(),
+ // such that a test event listener can see the actual test order
+ // in the event.
+ ShuffleTests();
+ }
+
+ // Tells the unit test event listeners that the tests are about to start.
+ repeater->OnTestIterationStart(*parent_, i);
+
+ // Runs each test case if there is at least one test to run.
+ if (has_tests_to_run) {
+ // Sets up all environments beforehand.
+ repeater->OnEnvironmentsSetUpStart(*parent_);
+ ForEach(environments_, SetUpEnvironment);
+ repeater->OnEnvironmentsSetUpEnd(*parent_);
+
+ // Runs the tests only if there was no fatal failure during global
+ // set-up.
+ if (!Test::HasFatalFailure()) {
+ for (int test_index = 0; test_index < total_test_case_count();
+ test_index++) {
+ GetMutableTestCase(test_index)->Run();
+ }
+ }
+
+ // Tears down all environments in reverse order afterwards.
+ repeater->OnEnvironmentsTearDownStart(*parent_);
+ std::for_each(environments_.rbegin(), environments_.rend(),
+ TearDownEnvironment);
+ repeater->OnEnvironmentsTearDownEnd(*parent_);
+ }
+
+ elapsed_time_ = GetTimeInMillis() - start;
+
+ // Tells the unit test event listener that the tests have just finished.
+ repeater->OnTestIterationEnd(*parent_, i);
+
+ // Gets the result and clears it.
+ if (!Passed()) {
+ failed = true;
+ }
+
+ // Restores the original test order after the iteration. This
+ // allows the user to quickly repro a failure that happens in the
+ // N-th iteration without repeating the first (N - 1) iterations.
+ // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in
+ // case the user somehow changes the value of the flag somewhere
+ // (it's always safe to unshuffle the tests).
+ UnshuffleTests();
+
+ if (GTEST_FLAG(shuffle)) {
+ // Picks a new random seed for each iteration.
+ random_seed_ = GetNextRandomSeed(random_seed_);
+ }
+ }
+
+ repeater->OnTestProgramEnd(*parent_);
+
+ return !failed;
+}
+
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded() {
+ const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile);
+ if (test_shard_file != NULL) {
+ FILE* const file = posix::FOpen(test_shard_file, "w");
+ if (file == NULL) {
+ ColoredPrintf(COLOR_RED,
+ "Could not write to the test shard status file \"%s\" "
+ "specified by the %s environment variable.\n",
+ test_shard_file, kTestShardStatusFile);
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ }
+ fclose(file);
+ }
+}
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (i.e., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+bool ShouldShard(const char* total_shards_env,
+ const char* shard_index_env,
+ bool in_subprocess_for_death_test) {
+ if (in_subprocess_for_death_test) {
+ return false;
+ }
+
+ const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1);
+ const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1);
+
+ if (total_shards == -1 && shard_index == -1) {
+ return false;
+ } else if (total_shards == -1 && shard_index != -1) {
+ const Message msg = Message()
+ << "Invalid environment variables: you have "
+ << kTestShardIndex << " = " << shard_index
+ << ", but have left " << kTestTotalShards << " unset.\n";
+ ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ } else if (total_shards != -1 && shard_index == -1) {
+ const Message msg = Message()
+ << "Invalid environment variables: you have "
+ << kTestTotalShards << " = " << total_shards
+ << ", but have left " << kTestShardIndex << " unset.\n";
+ ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ } else if (shard_index < 0 || shard_index >= total_shards) {
+ const Message msg = Message()
+ << "Invalid environment variables: we require 0 <= "
+ << kTestShardIndex << " < " << kTestTotalShards
+ << ", but you have " << kTestShardIndex << "=" << shard_index
+ << ", " << kTestTotalShards << "=" << total_shards << ".\n";
+ ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ }
+
+ return total_shards > 1;
+}
+
+// Parses the environment variable var as an Int32. If it is unset,
+// returns default_val. If it is not an Int32, prints an error
+// and aborts.
+Int32 Int32FromEnvOrDie(const char* var, Int32 default_val) {
+ const char* str_val = posix::GetEnv(var);
+ if (str_val == NULL) {
+ return default_val;
+ }
+
+ Int32 result;
+ if (!ParseInt32(Message() << "The value of environment variable " << var,
+ str_val, &result)) {
+ exit(EXIT_FAILURE);
+ }
+ return result;
+}
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true iff the test should be run on this shard. The test id is
+// some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) {
+ return (test_id % total_shards) == shard_index;
+}
+
+// Compares the name of each test with the user-specified filter to
+// decide whether the test should be run, then records the result in
+// each TestCase and TestInfo object.
+// If shard_tests == true, further filters tests based on sharding
+// variables in the environment - see
+// http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide.
+// Returns the number of tests that should run.
+int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) {
+ const Int32 total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ?
+ Int32FromEnvOrDie(kTestTotalShards, -1) : -1;
+ const Int32 shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ?
+ Int32FromEnvOrDie(kTestShardIndex, -1) : -1;
+
+ // num_runnable_tests are the number of tests that will
+ // run across all shards (i.e., match filter and are not disabled).
+ // num_selected_tests are the number of tests to be run on
+ // this shard.
+ int num_runnable_tests = 0;
+ int num_selected_tests = 0;
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ TestCase* const test_case = test_cases_[i];
+ const String &test_case_name = test_case->name();
+ test_case->set_should_run(false);
+
+ for (size_t j = 0; j < test_case->test_info_list().size(); j++) {
+ TestInfo* const test_info = test_case->test_info_list()[j];
+ const String test_name(test_info->name());
+ // A test is disabled if test case name or test name matches
+ // kDisableTestFilter.
+ const bool is_disabled =
+ internal::UnitTestOptions::MatchesFilter(test_case_name,
+ kDisableTestFilter) ||
+ internal::UnitTestOptions::MatchesFilter(test_name,
+ kDisableTestFilter);
+ test_info->is_disabled_ = is_disabled;
+
+ const bool matches_filter =
+ internal::UnitTestOptions::FilterMatchesTest(test_case_name,
+ test_name);
+ test_info->matches_filter_ = matches_filter;
+
+ const bool is_runnable =
+ (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) &&
+ matches_filter;
+
+ const bool is_selected = is_runnable &&
+ (shard_tests == IGNORE_SHARDING_PROTOCOL ||
+ ShouldRunTestOnShard(total_shards, shard_index,
+ num_runnable_tests));
+
+ num_runnable_tests += is_runnable;
+ num_selected_tests += is_selected;
+
+ test_info->should_run_ = is_selected;
+ test_case->set_should_run(test_case->should_run() || is_selected);
+ }
+ }
+ return num_selected_tests;
+}
+
+// Prints the names of the tests matching the user-specified filter flag.
+void UnitTestImpl::ListTestsMatchingFilter() {
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ const TestCase* const test_case = test_cases_[i];
+ bool printed_test_case_name = false;
+
+ for (size_t j = 0; j < test_case->test_info_list().size(); j++) {
+ const TestInfo* const test_info =
+ test_case->test_info_list()[j];
+ if (test_info->matches_filter_) {
+ if (!printed_test_case_name) {
+ printed_test_case_name = true;
+ printf("%s.\n", test_case->name());
+ }
+ printf(" %s\n", test_info->name());
+ }
+ }
+ }
+ fflush(stdout);
+}
+
+// Sets the OS stack trace getter.
+//
+// Does nothing if the input and the current OS stack trace getter are
+// the same; otherwise, deletes the old getter and makes the input the
+// current getter.
+void UnitTestImpl::set_os_stack_trace_getter(
+ OsStackTraceGetterInterface* getter) {
+ if (os_stack_trace_getter_ != getter) {
+ delete os_stack_trace_getter_;
+ os_stack_trace_getter_ = getter;
+ }
+}
+
+// Returns the current OS stack trace getter if it is not NULL;
+// otherwise, creates an OsStackTraceGetter, makes it the current
+// getter, and returns it.
+OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() {
+ if (os_stack_trace_getter_ == NULL) {
+ os_stack_trace_getter_ = new OsStackTraceGetter;
+ }
+
+ return os_stack_trace_getter_;
+}
+
+// Returns the TestResult for the test that's currently running, or
+// the TestResult for the ad hoc test if no test is running.
+TestResult* UnitTestImpl::current_test_result() {
+ return current_test_info_ ?
+ &(current_test_info_->result_) : &ad_hoc_test_result_;
+}
+
+// Shuffles all test cases, and the tests within each test case,
+// making sure that death tests are still run first.
+void UnitTestImpl::ShuffleTests() {
+ // Shuffles the death test cases.
+ ShuffleRange(random(), 0, last_death_test_case_ + 1, &test_case_indices_);
+
+ // Shuffles the non-death test cases.
+ ShuffleRange(random(), last_death_test_case_ + 1,
+ static_cast<int>(test_cases_.size()), &test_case_indices_);
+
+ // Shuffles the tests inside each test case.
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ test_cases_[i]->ShuffleTests(random());
+ }
+}
+
+// Restores the test cases and tests to their order before the first shuffle.
+void UnitTestImpl::UnshuffleTests() {
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ // Unshuffles the tests in each test case.
+ test_cases_[i]->UnshuffleTests();
+ // Resets the index of each test case.
+ test_case_indices_[i] = static_cast<int>(i);
+ }
+}
+
+// Returns the current OS stack trace as a String.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
+// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
+String GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/,
+ int skip_count) {
+ // We pass skip_count + 1 to skip this wrapper function in addition
+ // to what the user really wants to skip.
+ return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1);
+}
+
+// Used by the GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_ macro to
+// suppress unreachable code warnings.
+namespace {
+class ClassUniqueToAlwaysTrue {};
+}
+
+bool IsTrue(bool condition) { return condition; }
+
+bool AlwaysTrue() {
+#if GTEST_HAS_EXCEPTIONS
+ // This condition is always false so AlwaysTrue() never actually throws,
+ // but it makes the compiler think that it may throw.
+ if (IsTrue(false))
+ throw ClassUniqueToAlwaysTrue();
+#endif // GTEST_HAS_EXCEPTIONS
+ return true;
+}
+
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false. None of pstr, *pstr, and prefix can be NULL.
+bool SkipPrefix(const char* prefix, const char** pstr) {
+ const size_t prefix_len = strlen(prefix);
+ if (strncmp(*pstr, prefix, prefix_len) == 0) {
+ *pstr += prefix_len;
+ return true;
+ }
+ return false;
+}
+
+// Parses a string as a command line flag. The string should have
+// the format "--flag=value". When def_optional is true, the "=value"
+// part can be omitted.
+//
+// Returns the value of the flag, or NULL if the parsing failed.
+const char* ParseFlagValue(const char* str,
+ const char* flag,
+ bool def_optional) {
+ // str and flag must not be NULL.
+ if (str == NULL || flag == NULL) return NULL;
+
+ // The flag must start with "--" followed by GTEST_FLAG_PREFIX_.
+ const String flag_str = String::Format("--%s%s", GTEST_FLAG_PREFIX_, flag);
+ const size_t flag_len = flag_str.length();
+ if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL;
+
+ // Skips the flag name.
+ const char* flag_end = str + flag_len;
+
+ // When def_optional is true, it's OK to not have a "=value" part.
+ if (def_optional && (flag_end[0] == '\0')) {
+ return flag_end;
+ }
+
+ // If def_optional is true and there are more characters after the
+ // flag name, or if def_optional is false, there must be a '=' after
+ // the flag name.
+ if (flag_end[0] != '=') return NULL;
+
+ // Returns the string after "=".
+ return flag_end + 1;
+}
+
+// Parses a string for a bool flag, in the form of either
+// "--flag=value" or "--flag".
+//
+// In the former case, the value is taken as true as long as it does
+// not start with '0', 'f', or 'F'.
+//
+// In the latter case, the value is taken as true.
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, true);
+
+ // Aborts if the parsing failed.
+ if (value_str == NULL) return false;
+
+ // Converts the string value to a bool.
+ *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F');
+ return true;
+}
+
+// Parses a string for an Int32 flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseInt32Flag(const char* str, const char* flag, Int32* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == NULL) return false;
+
+ // Sets *value to the value of the flag.
+ return ParseInt32(Message() << "The value of flag --" << flag,
+ value_str, value);
+}
+
+// Parses a string for a string flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseStringFlag(const char* str, const char* flag, String* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == NULL) return false;
+
+ // Sets *value to the value of the flag.
+ *value = value_str;
+ return true;
+}
+
+// Determines whether a string has a prefix that Google Test uses for its
+// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_.
+// If Google Test detects that a command line flag has its prefix but is not
+// recognized, it will print its help message. Flags starting with
+// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test
+// internal flags and do not trigger the help message.
+static bool HasGoogleTestFlagPrefix(const char* str) {
+ return (SkipPrefix("--", &str) ||
+ SkipPrefix("-", &str) ||
+ SkipPrefix("/", &str)) &&
+ !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) &&
+ (SkipPrefix(GTEST_FLAG_PREFIX_, &str) ||
+ SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str));
+}
+
+// Prints a string containing code-encoded text. The following escape
+// sequences can be used in the string to control the text color:
+//
+// @@ prints a single '@' character.
+// @R changes the color to red.
+// @G changes the color to green.
+// @Y changes the color to yellow.
+// @D changes to the default terminal text color.
+//
+// TODO(wan@google.com): Write tests for this once we add stdout
+// capturing to Google Test.
+static void PrintColorEncoded(const char* str) {
+ GTestColor color = COLOR_DEFAULT; // The current color.
+
+ // Conceptually, we split the string into segments divided by escape
+ // sequences. Then we print one segment at a time. At the end of
+ // each iteration, the str pointer advances to the beginning of the
+ // next segment.
+ for (;;) {
+ const char* p = strchr(str, '@');
+ if (p == NULL) {
+ ColoredPrintf(color, "%s", str);
+ return;
+ }
+
+ ColoredPrintf(color, "%s", String(str, p - str).c_str());
+
+ const char ch = p[1];
+ str = p + 2;
+ if (ch == '@') {
+ ColoredPrintf(color, "@");
+ } else if (ch == 'D') {
+ color = COLOR_DEFAULT;
+ } else if (ch == 'R') {
+ color = COLOR_RED;
+ } else if (ch == 'G') {
+ color = COLOR_GREEN;
+ } else if (ch == 'Y') {
+ color = COLOR_YELLOW;
+ } else {
+ --str;
+ }
+ }
+}
+
+static const char kColorEncodedHelpMessage[] =
+"This program contains tests written using " GTEST_NAME_ ". You can use the\n"
+"following command line flags to control its behavior:\n"
+"\n"
+"Test Selection:\n"
+" @G--" GTEST_FLAG_PREFIX_ "list_tests@D\n"
+" List the names of all tests instead of running them. The name of\n"
+" TEST(Foo, Bar) is \"Foo.Bar\".\n"
+" @G--" GTEST_FLAG_PREFIX_ "filter=@YPOSTIVE_PATTERNS"
+ "[@G-@YNEGATIVE_PATTERNS]@D\n"
+" Run only the tests whose name matches one of the positive patterns but\n"
+" none of the negative patterns. '?' matches any single character; '*'\n"
+" matches any substring; ':' separates two patterns.\n"
+" @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n"
+" Run all disabled tests too.\n"
+"\n"
+"Test Execution:\n"
+" @G--" GTEST_FLAG_PREFIX_ "repeat=@Y[COUNT]@D\n"
+" Run the tests repeatedly; use a negative count to repeat forever.\n"
+" @G--" GTEST_FLAG_PREFIX_ "shuffle@D\n"
+" Randomize tests' orders on every iteration.\n"
+" @G--" GTEST_FLAG_PREFIX_ "random_seed=@Y[NUMBER]@D\n"
+" Random number seed to use for shuffling test orders (between 1 and\n"
+" 99999, or 0 to use a seed based on the current time).\n"
+"\n"
+"Test Output:\n"
+" @G--" GTEST_FLAG_PREFIX_ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n"
+" Enable/disable colored output. The default is @Gauto@D.\n"
+" -@G-" GTEST_FLAG_PREFIX_ "print_time=0@D\n"
+" Don't print the elapsed time of each test.\n"
+" @G--" GTEST_FLAG_PREFIX_ "output=xml@Y[@G:@YDIRECTORY_PATH@G"
+ GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n"
+" Generate an XML report in the given directory or with the given file\n"
+" name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\n"
+#if GTEST_CAN_STREAM_RESULTS_
+" @G--" GTEST_FLAG_PREFIX_ "stream_result_to=@YHOST@G:@YPORT@D\n"
+" Stream test results to the given server.\n"
+#endif // GTEST_CAN_STREAM_RESULTS_
+"\n"
+"Assertion Behavior:\n"
+#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
+" @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n"
+" Set the default death test style.\n"
+#endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
+" @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n"
+" Turn assertion failures into debugger break-points.\n"
+" @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n"
+" Turn assertion failures into C++ exceptions.\n"
+" @G--" GTEST_FLAG_PREFIX_ "catch_exceptions=0@D\n"
+" Do not report exceptions as test failures. Instead, allow them\n"
+" to crash the program or throw a pop-up (on Windows).\n"
+"\n"
+"Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set "
+ "the corresponding\n"
+"environment variable of a flag (all letters in upper-case). For example, to\n"
+"disable colored text output, you can either specify @G--" GTEST_FLAG_PREFIX_
+ "color=no@D or set\n"
+"the @G" GTEST_FLAG_PREFIX_UPPER_ "COLOR@D environment variable to @Gno@D.\n"
+"\n"
+"For more information, please read the " GTEST_NAME_ " documentation at\n"
+"@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_ "\n"
+"(not one in your own code or tests), please report it to\n"
+"@G<" GTEST_DEV_EMAIL_ ">@D.\n";
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test. The type parameter CharType can be
+// instantiated to either char or wchar_t.
+template <typename CharType>
+void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) {
+ for (int i = 1; i < *argc; i++) {
+ const String arg_string = StreamableToString(argv[i]);
+ const char* const arg = arg_string.c_str();
+
+ using internal::ParseBoolFlag;
+ using internal::ParseInt32Flag;
+ using internal::ParseStringFlag;
+
+ // Do we see a Google Test flag?
+ if (ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag,
+ &GTEST_FLAG(also_run_disabled_tests)) ||
+ ParseBoolFlag(arg, kBreakOnFailureFlag,
+ &GTEST_FLAG(break_on_failure)) ||
+ ParseBoolFlag(arg, kCatchExceptionsFlag,
+ &GTEST_FLAG(catch_exceptions)) ||
+ ParseStringFlag(arg, kColorFlag, &GTEST_FLAG(color)) ||
+ ParseStringFlag(arg, kDeathTestStyleFlag,
+ &GTEST_FLAG(death_test_style)) ||
+ ParseBoolFlag(arg, kDeathTestUseFork,
+ &GTEST_FLAG(death_test_use_fork)) ||
+ ParseStringFlag(arg, kFilterFlag, &GTEST_FLAG(filter)) ||
+ ParseStringFlag(arg, kInternalRunDeathTestFlag,
+ &GTEST_FLAG(internal_run_death_test)) ||
+ ParseBoolFlag(arg, kListTestsFlag, &GTEST_FLAG(list_tests)) ||
+ ParseStringFlag(arg, kOutputFlag, &GTEST_FLAG(output)) ||
+ ParseBoolFlag(arg, kPrintTimeFlag, &GTEST_FLAG(print_time)) ||
+ ParseInt32Flag(arg, kRandomSeedFlag, &GTEST_FLAG(random_seed)) ||
+ ParseInt32Flag(arg, kRepeatFlag, &GTEST_FLAG(repeat)) ||
+ ParseBoolFlag(arg, kShuffleFlag, &GTEST_FLAG(shuffle)) ||
+ ParseInt32Flag(arg, kStackTraceDepthFlag,
+ &GTEST_FLAG(stack_trace_depth)) ||
+ ParseStringFlag(arg, kStreamResultToFlag,
+ &GTEST_FLAG(stream_result_to)) ||
+ ParseBoolFlag(arg, kThrowOnFailureFlag,
+ &GTEST_FLAG(throw_on_failure))
+ ) {
+ // Yes. Shift the remainder of the argv list left by one. Note
+ // that argv has (*argc + 1) elements, the last one always being
+ // NULL. The following loop moves the trailing NULL element as
+ // well.
+ for (int j = i; j != *argc; j++) {
+ argv[j] = argv[j + 1];
+ }
+
+ // Decrements the argument count.
+ (*argc)--;
+
+ // We also need to decrement the iterator as we just removed
+ // an element.
+ i--;
+ } else if (arg_string == "--help" || arg_string == "-h" ||
+ arg_string == "-?" || arg_string == "/?" ||
+ HasGoogleTestFlagPrefix(arg)) {
+ // Both help flag and unrecognized Google Test flags (excluding
+ // internal ones) trigger help display.
+ g_help_flag = true;
+ }
+ }
+
+ if (g_help_flag) {
+ // We print the help here instead of in RUN_ALL_TESTS(), as the
+ // latter may not be called at all if the user is using Google
+ // Test with another testing framework.
+ PrintColorEncoded(kColorEncodedHelpMessage);
+ }
+}
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test.
+void ParseGoogleTestFlagsOnly(int* argc, char** argv) {
+ ParseGoogleTestFlagsOnlyImpl(argc, argv);
+}
+void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) {
+ ParseGoogleTestFlagsOnlyImpl(argc, argv);
+}
+
+// The internal implementation of InitGoogleTest().
+//
+// The type parameter CharType can be instantiated to either char or
+// wchar_t.
+template <typename CharType>
+void InitGoogleTestImpl(int* argc, CharType** argv) {
+ g_init_gtest_count++;
+
+ // We don't want to run the initialization code twice.
+ if (g_init_gtest_count != 1) return;
+
+ if (*argc <= 0) return;
+
+ internal::g_executable_path = internal::StreamableToString(argv[0]);
+
+#if GTEST_HAS_DEATH_TEST
+
+ g_argvs.clear();
+ for (int i = 0; i != *argc; i++) {
+ g_argvs.push_back(StreamableToString(argv[i]));
+ }
+
+#endif // GTEST_HAS_DEATH_TEST
+
+ ParseGoogleTestFlagsOnly(argc, argv);
+ GetUnitTestImpl()->PostFlagParsingInit();
+}
+
+} // namespace internal
+
+// Initializes Google Test. This must be called before calling
+// RUN_ALL_TESTS(). In particular, it parses a command line for the
+// flags that Google Test recognizes. Whenever a Google Test flag is
+// seen, it is removed from argv, and *argc is decremented.
+//
+// No value is returned. Instead, the Google Test flag variables are
+// updated.
+//
+// Calling the function for the second time has no user-visible effect.
+void InitGoogleTest(int* argc, char** argv) {
+ internal::InitGoogleTestImpl(argc, argv);
+}
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+void InitGoogleTest(int* argc, wchar_t** argv) {
+ internal::InitGoogleTestImpl(argc, argv);
+}
+
+} // namespace testing
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan), vladl@google.com (Vlad Losev)
+//
+// This file implements death tests.
+
+
+#if GTEST_HAS_DEATH_TEST
+
+# if GTEST_OS_MAC
+# include <crt_externs.h>
+# endif // GTEST_OS_MAC
+
+# include <errno.h>
+# include <fcntl.h>
+# include <limits.h>
+# include <stdarg.h>
+
+# if GTEST_OS_WINDOWS
+# include <windows.h>
+# else
+# include <sys/mman.h>
+# include <sys/wait.h>
+# endif // GTEST_OS_WINDOWS
+
+#endif // GTEST_HAS_DEATH_TEST
+
+
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+#undef GTEST_IMPLEMENTATION_
+
+namespace testing {
+
+// Constants.
+
+// The default death test style.
+static const char kDefaultDeathTestStyle[] = "fast";
+
+GTEST_DEFINE_string_(
+ death_test_style,
+ internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle),
+ "Indicates how to run a death test in a forked child process: "
+ "\"threadsafe\" (child process re-executes the test binary "
+ "from the beginning, running only the specific death test) or "
+ "\"fast\" (child process runs the death test immediately "
+ "after forking).");
+
+GTEST_DEFINE_bool_(
+ death_test_use_fork,
+ internal::BoolFromGTestEnv("death_test_use_fork", false),
+ "Instructs to use fork()/_exit() instead of clone() in death tests. "
+ "Ignored and always uses fork() on POSIX systems where clone() is not "
+ "implemented. Useful when running under valgrind or similar tools if "
+ "those do not support clone(). Valgrind 3.3.1 will just fail if "
+ "it sees an unsupported combination of clone() flags. "
+ "It is not recommended to use this flag w/o valgrind though it will "
+ "work in 99% of the cases. Once valgrind is fixed, this flag will "
+ "most likely be removed.");
+
+namespace internal {
+GTEST_DEFINE_string_(
+ internal_run_death_test, "",
+ "Indicates the file, line number, temporal index of "
+ "the single death test to run, and a file descriptor to "
+ "which a success code may be sent, all separated by "
+ "colons. This flag is specified if and only if the current "
+ "process is a sub-process launched for running a thread-safe "
+ "death test. FOR INTERNAL USE ONLY.");
+} // namespace internal
+
+#if GTEST_HAS_DEATH_TEST
+
+// ExitedWithCode constructor.
+ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) {
+}
+
+// ExitedWithCode function-call operator.
+bool ExitedWithCode::operator()(int exit_status) const {
+# if GTEST_OS_WINDOWS
+
+ return exit_status == exit_code_;
+
+# else
+
+ return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_;
+
+# endif // GTEST_OS_WINDOWS
+}
+
+# if !GTEST_OS_WINDOWS
+// KilledBySignal constructor.
+KilledBySignal::KilledBySignal(int signum) : signum_(signum) {
+}
+
+// KilledBySignal function-call operator.
+bool KilledBySignal::operator()(int exit_status) const {
+ return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_;
+}
+# endif // !GTEST_OS_WINDOWS
+
+namespace internal {
+
+// Utilities needed for death tests.
+
+// Generates a textual description of a given exit code, in the format
+// specified by wait(2).
+static String ExitSummary(int exit_code) {
+ Message m;
+
+# if GTEST_OS_WINDOWS
+
+ m << "Exited with exit status " << exit_code;
+
+# else
+
+ if (WIFEXITED(exit_code)) {
+ m << "Exited with exit status " << WEXITSTATUS(exit_code);
+ } else if (WIFSIGNALED(exit_code)) {
+ m << "Terminated by signal " << WTERMSIG(exit_code);
+ }
+# ifdef WCOREDUMP
+ if (WCOREDUMP(exit_code)) {
+ m << " (core dumped)";
+ }
+# endif
+# endif // GTEST_OS_WINDOWS
+
+ return m.GetString();
+}
+
+// Returns true if exit_status describes a process that was terminated
+// by a signal, or exited normally with a nonzero exit code.
+bool ExitedUnsuccessfully(int exit_status) {
+ return !ExitedWithCode(0)(exit_status);
+}
+
+# if !GTEST_OS_WINDOWS
+// Generates a textual failure message when a death test finds more than
+// one thread running, or cannot determine the number of threads, prior
+// to executing the given statement. It is the responsibility of the
+// caller not to pass a thread_count of 1.
+static String DeathTestThreadWarning(size_t thread_count) {
+ Message msg;
+ msg << "Death tests use fork(), which is unsafe particularly"
+ << " in a threaded context. For this test, " << GTEST_NAME_ << " ";
+ if (thread_count == 0)
+ msg << "couldn't detect the number of threads.";
+ else
+ msg << "detected " << thread_count << " threads.";
+ return msg.GetString();
+}
+# endif // !GTEST_OS_WINDOWS
+
+// Flag characters for reporting a death test that did not die.
+static const char kDeathTestLived = 'L';
+static const char kDeathTestReturned = 'R';
+static const char kDeathTestThrew = 'T';
+static const char kDeathTestInternalError = 'I';
+
+// An enumeration describing all of the possible ways that a death test can
+// conclude. DIED means that the process died while executing the test
+// code; LIVED means that process lived beyond the end of the test code;
+// RETURNED means that the test statement attempted to execute a return
+// statement, which is not allowed; THREW means that the test statement
+// returned control by throwing an exception. IN_PROGRESS means the test
+// has not yet concluded.
+// TODO(vladl@google.com): Unify names and possibly values for
+// AbortReason, DeathTestOutcome, and flag characters above.
+enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW };
+
+// Routine for aborting the program which is safe to call from an
+// exec-style death test child process, in which case the error
+// message is propagated back to the parent process. Otherwise, the
+// message is simply printed to stderr. In either case, the program
+// then exits with status 1.
+void DeathTestAbort(const String& message) {
+ // On a POSIX system, this function may be called from a threadsafe-style
+ // death test child process, which operates on a very small stack. Use
+ // the heap for any additional non-minuscule memory requirements.
+ const InternalRunDeathTestFlag* const flag =
+ GetUnitTestImpl()->internal_run_death_test_flag();
+ if (flag != NULL) {
+ FILE* parent = posix::FDOpen(flag->write_fd(), "w");
+ fputc(kDeathTestInternalError, parent);
+ fprintf(parent, "%s", message.c_str());
+ fflush(parent);
+ _exit(1);
+ } else {
+ fprintf(stderr, "%s", message.c_str());
+ fflush(stderr);
+ posix::Abort();
+ }
+}
+
+// A replacement for CHECK that calls DeathTestAbort if the assertion
+// fails.
+# define GTEST_DEATH_TEST_CHECK_(expression) \
+ do { \
+ if (!::testing::internal::IsTrue(expression)) { \
+ DeathTestAbort(::testing::internal::String::Format( \
+ "CHECK failed: File %s, line %d: %s", \
+ __FILE__, __LINE__, #expression)); \
+ } \
+ } while (::testing::internal::AlwaysFalse())
+
+// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for
+// evaluating any system call that fulfills two conditions: it must return
+// -1 on failure, and set errno to EINTR when it is interrupted and
+// should be tried again. The macro expands to a loop that repeatedly
+// evaluates the expression as long as it evaluates to -1 and sets
+// errno to EINTR. If the expression evaluates to -1 but errno is
+// something other than EINTR, DeathTestAbort is called.
+# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \
+ do { \
+ int gtest_retval; \
+ do { \
+ gtest_retval = (expression); \
+ } while (gtest_retval == -1 && errno == EINTR); \
+ if (gtest_retval == -1) { \
+ DeathTestAbort(::testing::internal::String::Format( \
+ "CHECK failed: File %s, line %d: %s != -1", \
+ __FILE__, __LINE__, #expression)); \
+ } \
+ } while (::testing::internal::AlwaysFalse())
+
+// Returns the message describing the last system error in errno.
+String GetLastErrnoDescription() {
+ return String(errno == 0 ? "" : posix::StrError(errno));
+}
+
+// This is called from a death test parent process to read a failure
+// message from the death test child process and log it with the FATAL
+// severity. On Windows, the message is read from a pipe handle. On other
+// platforms, it is read from a file descriptor.
+static void FailFromInternalError(int fd) {
+ Message error;
+ char buffer[256];
+ int num_read;
+
+ do {
+ while ((num_read = posix::Read(fd, buffer, 255)) > 0) {
+ buffer[num_read] = '\0';
+ error << buffer;
+ }
+ } while (num_read == -1 && errno == EINTR);
+
+ if (num_read == 0) {
+ GTEST_LOG_(FATAL) << error.GetString();
+ } else {
+ const int last_error = errno;
+ GTEST_LOG_(FATAL) << "Error while reading death test internal: "
+ << GetLastErrnoDescription() << " [" << last_error << "]";
+ }
+}
+
+// Death test constructor. Increments the running death test count
+// for the current test.
+DeathTest::DeathTest() {
+ TestInfo* const info = GetUnitTestImpl()->current_test_info();
+ if (info == NULL) {
+ DeathTestAbort("Cannot run a death test outside of a TEST or "
+ "TEST_F construct");
+ }
+}
+
+// Creates and returns a death test by dispatching to the current
+// death test factory.
+bool DeathTest::Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test) {
+ return GetUnitTestImpl()->death_test_factory()->Create(
+ statement, regex, file, line, test);
+}
+
+const char* DeathTest::LastMessage() {
+ return last_death_test_message_.c_str();
+}
+
+void DeathTest::set_last_death_test_message(const String& message) {
+ last_death_test_message_ = message;
+}
+
+String DeathTest::last_death_test_message_;
+
+// Provides cross platform implementation for some death functionality.
+class DeathTestImpl : public DeathTest {
+ protected:
+ DeathTestImpl(const char* a_statement, const RE* a_regex)
+ : statement_(a_statement),
+ regex_(a_regex),
+ spawned_(false),
+ status_(-1),
+ outcome_(IN_PROGRESS),
+ read_fd_(-1),
+ write_fd_(-1) {}
+
+ // read_fd_ is expected to be closed and cleared by a derived class.
+ ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); }
+
+ void Abort(AbortReason reason);
+ virtual bool Passed(bool status_ok);
+
+ const char* statement() const { return statement_; }
+ const RE* regex() const { return regex_; }
+ bool spawned() const { return spawned_; }
+ void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
+ int status() const { return status_; }
+ void set_status(int a_status) { status_ = a_status; }
+ DeathTestOutcome outcome() const { return outcome_; }
+ void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; }
+ int read_fd() const { return read_fd_; }
+ void set_read_fd(int fd) { read_fd_ = fd; }
+ int write_fd() const { return write_fd_; }
+ void set_write_fd(int fd) { write_fd_ = fd; }
+
+ // Called in the parent process only. Reads the result code of the death
+ // test child process via a pipe, interprets it to set the outcome_
+ // member, and closes read_fd_. Outputs diagnostics and terminates in
+ // case of unexpected codes.
+ void ReadAndInterpretStatusByte();
+
+ private:
+ // The textual content of the code this object is testing. This class
+ // doesn't own this string and should not attempt to delete it.
+ const char* const statement_;
+ // The regular expression which test output must match. DeathTestImpl
+ // doesn't own this object and should not attempt to delete it.
+ const RE* const regex_;
+ // True if the death test child process has been successfully spawned.
+ bool spawned_;
+ // The exit status of the child process.
+ int status_;
+ // How the death test concluded.
+ DeathTestOutcome outcome_;
+ // Descriptor to the read end of the pipe to the child process. It is
+ // always -1 in the child process. The child keeps its write end of the
+ // pipe in write_fd_.
+ int read_fd_;
+ // Descriptor to the child's write end of the pipe to the parent process.
+ // It is always -1 in the parent process. The parent keeps its end of the
+ // pipe in read_fd_.
+ int write_fd_;
+};
+
+// Called in the parent process only. Reads the result code of the death
+// test child process via a pipe, interprets it to set the outcome_
+// member, and closes read_fd_. Outputs diagnostics and terminates in
+// case of unexpected codes.
+void DeathTestImpl::ReadAndInterpretStatusByte() {
+ char flag;
+ int bytes_read;
+
+ // The read() here blocks until data is available (signifying the
+ // failure of the death test) or until the pipe is closed (signifying
+ // its success), so it's okay to call this in the parent before
+ // the child process has exited.
+ do {
+ bytes_read = posix::Read(read_fd(), &flag, 1);
+ } while (bytes_read == -1 && errno == EINTR);
+
+ if (bytes_read == 0) {
+ set_outcome(DIED);
+ } else if (bytes_read == 1) {
+ switch (flag) {
+ case kDeathTestReturned:
+ set_outcome(RETURNED);
+ break;
+ case kDeathTestThrew:
+ set_outcome(THREW);
+ break;
+ case kDeathTestLived:
+ set_outcome(LIVED);
+ break;
+ case kDeathTestInternalError:
+ FailFromInternalError(read_fd()); // Does not return.
+ break;
+ default:
+ GTEST_LOG_(FATAL) << "Death test child process reported "
+ << "unexpected status byte ("
+ << static_cast<unsigned int>(flag) << ")";
+ }
+ } else {
+ GTEST_LOG_(FATAL) << "Read from death test child process failed: "
+ << GetLastErrnoDescription();
+ }
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd()));
+ set_read_fd(-1);
+}
+
+// Signals that the death test code which should have exited, didn't.
+// Should be called only in a death test child process.
+// Writes a status byte to the child's status file descriptor, then
+// calls _exit(1).
+void DeathTestImpl::Abort(AbortReason reason) {
+ // The parent process considers the death test to be a failure if
+ // it finds any data in our pipe. So, here we write a single flag byte
+ // to the pipe, then exit.
+ const char status_ch =
+ reason == TEST_DID_NOT_DIE ? kDeathTestLived :
+ reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned;
+
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1));
+ // We are leaking the descriptor here because on some platforms (i.e.,
+ // when built as Windows DLL), destructors of global objects will still
+ // run after calling _exit(). On such systems, write_fd_ will be
+ // indirectly closed from the destructor of UnitTestImpl, causing double
+ // close if it is also closed here. On debug configurations, double close
+ // may assert. As there are no in-process buffers to flush here, we are
+ // relying on the OS to close the descriptor after the process terminates
+ // when the destructors are not run.
+ _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash)
+}
+
+// Returns an indented copy of stderr output for a death test.
+// This makes distinguishing death test output lines from regular log lines
+// much easier.
+static ::std::string FormatDeathTestOutput(const ::std::string& output) {
+ ::std::string ret;
+ for (size_t at = 0; ; ) {
+ const size_t line_end = output.find('\n', at);
+ ret += "[ DEATH ] ";
+ if (line_end == ::std::string::npos) {
+ ret += output.substr(at);
+ break;
+ }
+ ret += output.substr(at, line_end + 1 - at);
+ at = line_end + 1;
+ }
+ return ret;
+}
+
+// Assesses the success or failure of a death test, using both private
+// members which have previously been set, and one argument:
+//
+// Private data members:
+// outcome: An enumeration describing how the death test
+// concluded: DIED, LIVED, THREW, or RETURNED. The death test
+// fails in the latter three cases.
+// status: The exit status of the child process. On *nix, it is in the
+// in the format specified by wait(2). On Windows, this is the
+// value supplied to the ExitProcess() API or a numeric code
+// of the exception that terminated the program.
+// regex: A regular expression object to be applied to
+// the test's captured standard error output; the death test
+// fails if it does not match.
+//
+// Argument:
+// status_ok: true if exit_status is acceptable in the context of
+// this particular death test, which fails if it is false
+//
+// Returns true iff all of the above conditions are met. Otherwise, the
+// first failing condition, in the order given above, is the one that is
+// reported. Also sets the last death test message string.
+bool DeathTestImpl::Passed(bool status_ok) {
+ if (!spawned())
+ return false;
+
+ const String error_message = GetCapturedStderr();
+
+ bool success = false;
+ Message buffer;
+
+ buffer << "Death test: " << statement() << "\n";
+ switch (outcome()) {
+ case LIVED:
+ buffer << " Result: failed to die.\n"
+ << " Error msg:\n" << FormatDeathTestOutput(error_message);
+ break;
+ case THREW:
+ buffer << " Result: threw an exception.\n"
+ << " Error msg:\n" << FormatDeathTestOutput(error_message);
+ break;
+ case RETURNED:
+ buffer << " Result: illegal return in test statement.\n"
+ << " Error msg:\n" << FormatDeathTestOutput(error_message);
+ break;
+ case DIED:
+ if (status_ok) {
+ const bool matched = RE::PartialMatch(error_message.c_str(), *regex());
+ if (matched) {
+ success = true;
+ } else {
+ buffer << " Result: died but not with expected error.\n"
+ << " Expected: " << regex()->pattern() << "\n"
+ << "Actual msg:\n" << FormatDeathTestOutput(error_message);
+ }
+ } else {
+ buffer << " Result: died but not with expected exit code:\n"
+ << " " << ExitSummary(status()) << "\n"
+ << "Actual msg:\n" << FormatDeathTestOutput(error_message);
+ }
+ break;
+ case IN_PROGRESS:
+ default:
+ GTEST_LOG_(FATAL)
+ << "DeathTest::Passed somehow called before conclusion of test";
+ }
+
+ DeathTest::set_last_death_test_message(buffer.GetString());
+ return success;
+}
+
+# if GTEST_OS_WINDOWS
+// WindowsDeathTest implements death tests on Windows. Due to the
+// specifics of starting new processes on Windows, death tests there are
+// always threadsafe, and Google Test considers the
+// --gtest_death_test_style=fast setting to be equivalent to
+// --gtest_death_test_style=threadsafe there.
+//
+// A few implementation notes: Like the Linux version, the Windows
+// implementation uses pipes for child-to-parent communication. But due to
+// the specifics of pipes on Windows, some extra steps are required:
+//
+// 1. The parent creates a communication pipe and stores handles to both
+// ends of it.
+// 2. The parent starts the child and provides it with the information
+// necessary to acquire the handle to the write end of the pipe.
+// 3. The child acquires the write end of the pipe and signals the parent
+// using a Windows event.
+// 4. Now the parent can release the write end of the pipe on its side. If
+// this is done before step 3, the object's reference count goes down to
+// 0 and it is destroyed, preventing the child from acquiring it. The
+// parent now has to release it, or read operations on the read end of
+// the pipe will not return when the child terminates.
+// 5. The parent reads child's output through the pipe (outcome code and
+// any possible error messages) from the pipe, and its stderr and then
+// determines whether to fail the test.
+//
+// Note: to distinguish Win32 API calls from the local method and function
+// calls, the former are explicitly resolved in the global namespace.
+//
+class WindowsDeathTest : public DeathTestImpl {
+ public:
+ WindowsDeathTest(const char* a_statement,
+ const RE* a_regex,
+ const char* file,
+ int line)
+ : DeathTestImpl(a_statement, a_regex), file_(file), line_(line) {}
+
+ // All of these virtual functions are inherited from DeathTest.
+ virtual int Wait();
+ virtual TestRole AssumeRole();
+
+ private:
+ // The name of the file in which the death test is located.
+ const char* const file_;
+ // The line number on which the death test is located.
+ const int line_;
+ // Handle to the write end of the pipe to the child process.
+ AutoHandle write_handle_;
+ // Child process handle.
+ AutoHandle child_handle_;
+ // Event the child process uses to signal the parent that it has
+ // acquired the handle to the write end of the pipe. After seeing this
+ // event the parent can release its own handles to make sure its
+ // ReadFile() calls return when the child terminates.
+ AutoHandle event_handle_;
+};
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists. As a side effect, sets the
+// outcome data member.
+int WindowsDeathTest::Wait() {
+ if (!spawned())
+ return 0;
+
+ // Wait until the child either signals that it has acquired the write end
+ // of the pipe or it dies.
+ const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() };
+ switch (::WaitForMultipleObjects(2,
+ wait_handles,
+ FALSE, // Waits for any of the handles.
+ INFINITE)) {
+ case WAIT_OBJECT_0:
+ case WAIT_OBJECT_0 + 1:
+ break;
+ default:
+ GTEST_DEATH_TEST_CHECK_(false); // Should not get here.
+ }
+
+ // The child has acquired the write end of the pipe or exited.
+ // We release the handle on our side and continue.
+ write_handle_.Reset();
+ event_handle_.Reset();
+
+ ReadAndInterpretStatusByte();
+
+ // Waits for the child process to exit if it haven't already. This
+ // returns immediately if the child has already exited, regardless of
+ // whether previous calls to WaitForMultipleObjects synchronized on this
+ // handle or not.
+ GTEST_DEATH_TEST_CHECK_(
+ WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(),
+ INFINITE));
+ DWORD status_code;
+ GTEST_DEATH_TEST_CHECK_(
+ ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE);
+ child_handle_.Reset();
+ set_status(static_cast<int>(status_code));
+ return status();
+}
+
+// The AssumeRole process for a Windows death test. It creates a child
+// process with the same executable as the current process to run the
+// death test. The child process is given the --gtest_filter and
+// --gtest_internal_run_death_test flags such that it knows to run the
+// current death test only.
+DeathTest::TestRole WindowsDeathTest::AssumeRole() {
+ const UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const TestInfo* const info = impl->current_test_info();
+ const int death_test_index = info->result()->death_test_count();
+
+ if (flag != NULL) {
+ // ParseInternalRunDeathTestFlag() has performed all the necessary
+ // processing.
+ set_write_fd(flag->write_fd());
+ return EXECUTE_TEST;
+ }
+
+ // WindowsDeathTest uses an anonymous pipe to communicate results of
+ // a death test.
+ SECURITY_ATTRIBUTES handles_are_inheritable = {
+ sizeof(SECURITY_ATTRIBUTES), NULL, TRUE };
+ HANDLE read_handle, write_handle;
+ GTEST_DEATH_TEST_CHECK_(
+ ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable,
+ 0) // Default buffer size.
+ != FALSE);
+ set_read_fd(::_open_osfhandle(reinterpret_cast<intptr_t>(read_handle),
+ O_RDONLY));
+ write_handle_.Reset(write_handle);
+ event_handle_.Reset(::CreateEvent(
+ &handles_are_inheritable,
+ TRUE, // The event will automatically reset to non-signaled state.
+ FALSE, // The initial state is non-signalled.
+ NULL)); // The even is unnamed.
+ GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL);
+ const String filter_flag = String::Format("--%s%s=%s.%s",
+ GTEST_FLAG_PREFIX_, kFilterFlag,
+ info->test_case_name(),
+ info->name());
+ const String internal_flag = String::Format(
+ "--%s%s=%s|%d|%d|%u|%Iu|%Iu",
+ GTEST_FLAG_PREFIX_,
+ kInternalRunDeathTestFlag,
+ file_, line_,
+ death_test_index,
+ static_cast<unsigned int>(::GetCurrentProcessId()),
+ // size_t has the same with as pointers on both 32-bit and 64-bit
+ // Windows platforms.
+ // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx.
+ reinterpret_cast<size_t>(write_handle),
+ reinterpret_cast<size_t>(event_handle_.Get()));
+
+ char executable_path[_MAX_PATH + 1]; // NOLINT
+ GTEST_DEATH_TEST_CHECK_(
+ _MAX_PATH + 1 != ::GetModuleFileNameA(NULL,
+ executable_path,
+ _MAX_PATH));
+
+ String command_line = String::Format("%s %s \"%s\"",
+ ::GetCommandLineA(),
+ filter_flag.c_str(),
+ internal_flag.c_str());
+
+ DeathTest::set_last_death_test_message("");
+
+ CaptureStderr();
+ // Flush the log buffers since the log streams are shared with the child.
+ FlushInfoLog();
+
+ // The child process will share the standard handles with the parent.
+ STARTUPINFOA startup_info;
+ memset(&startup_info, 0, sizeof(STARTUPINFO));
+ startup_info.dwFlags = STARTF_USESTDHANDLES;
+ startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE);
+ startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE);
+ startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE);
+
+ PROCESS_INFORMATION process_info;
+ GTEST_DEATH_TEST_CHECK_(::CreateProcessA(
+ executable_path,
+ const_cast<char*>(command_line.c_str()),
+ NULL, // Retuned process handle is not inheritable.
+ NULL, // Retuned thread handle is not inheritable.
+ TRUE, // Child inherits all inheritable handles (for write_handle_).
+ 0x0, // Default creation flags.
+ NULL, // Inherit the parent's environment.
+ UnitTest::GetInstance()->original_working_dir(),
+ &startup_info,
+ &process_info) != FALSE);
+ child_handle_.Reset(process_info.hProcess);
+ ::CloseHandle(process_info.hThread);
+ set_spawned(true);
+ return OVERSEE_TEST;
+}
+# else // We are not on Windows.
+
+// ForkingDeathTest provides implementations for most of the abstract
+// methods of the DeathTest interface. Only the AssumeRole method is
+// left undefined.
+class ForkingDeathTest : public DeathTestImpl {
+ public:
+ ForkingDeathTest(const char* statement, const RE* regex);
+
+ // All of these virtual functions are inherited from DeathTest.
+ virtual int Wait();
+
+ protected:
+ void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
+
+ private:
+ // PID of child process during death test; 0 in the child process itself.
+ pid_t child_pid_;
+};
+
+// Constructs a ForkingDeathTest.
+ForkingDeathTest::ForkingDeathTest(const char* a_statement, const RE* a_regex)
+ : DeathTestImpl(a_statement, a_regex),
+ child_pid_(-1) {}
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists. As a side effect, sets the
+// outcome data member.
+int ForkingDeathTest::Wait() {
+ if (!spawned())
+ return 0;
+
+ ReadAndInterpretStatusByte();
+
+ int status_value;
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0));
+ set_status(status_value);
+ return status_value;
+}
+
+// A concrete death test class that forks, then immediately runs the test
+// in the child process.
+class NoExecDeathTest : public ForkingDeathTest {
+ public:
+ NoExecDeathTest(const char* a_statement, const RE* a_regex) :
+ ForkingDeathTest(a_statement, a_regex) { }
+ virtual TestRole AssumeRole();
+};
+
+// The AssumeRole process for a fork-and-run death test. It implements a
+// straightforward fork, with a simple pipe to transmit the status byte.
+DeathTest::TestRole NoExecDeathTest::AssumeRole() {
+ const size_t thread_count = GetThreadCount();
+ if (thread_count != 1) {
+ GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count);
+ }
+
+ int pipe_fd[2];
+ GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
+
+ DeathTest::set_last_death_test_message("");
+ CaptureStderr();
+ // When we fork the process below, the log file buffers are copied, but the
+ // file descriptors are shared. We flush all log files here so that closing
+ // the file descriptors in the child process doesn't throw off the
+ // synchronization between descriptors and buffers in the parent process.
+ // This is as close to the fork as possible to avoid a race condition in case
+ // there are multiple threads running before the death test, and another
+ // thread writes to the log file.
+ FlushInfoLog();
+
+ const pid_t child_pid = fork();
+ GTEST_DEATH_TEST_CHECK_(child_pid != -1);
+ set_child_pid(child_pid);
+ if (child_pid == 0) {
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0]));
+ set_write_fd(pipe_fd[1]);
+ // Redirects all logging to stderr in the child process to prevent
+ // concurrent writes to the log files. We capture stderr in the parent
+ // process and append the child process' output to a log.
+ LogToStderr();
+ // Event forwarding to the listeners of event listener API mush be shut
+ // down in death test subprocesses.
+ GetUnitTestImpl()->listeners()->SuppressEventForwarding();
+ return EXECUTE_TEST;
+ } else {
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
+ set_read_fd(pipe_fd[0]);
+ set_spawned(true);
+ return OVERSEE_TEST;
+ }
+}
+
+// A concrete death test class that forks and re-executes the main
+// program from the beginning, with command-line flags set that cause
+// only this specific death test to be run.
+class ExecDeathTest : public ForkingDeathTest {
+ public:
+ ExecDeathTest(const char* a_statement, const RE* a_regex,
+ const char* file, int line) :
+ ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { }
+ virtual TestRole AssumeRole();
+ private:
+ // The name of the file in which the death test is located.
+ const char* const file_;
+ // The line number on which the death test is located.
+ const int line_;
+};
+
+// Utility class for accumulating command-line arguments.
+class Arguments {
+ public:
+ Arguments() {
+ args_.push_back(NULL);
+ }
+
+ ~Arguments() {
+ for (std::vector<char*>::iterator i = args_.begin(); i != args_.end();
+ ++i) {
+ free(*i);
+ }
+ }
+ void AddArgument(const char* argument) {
+ args_.insert(args_.end() - 1, posix::StrDup(argument));
+ }
+
+ template <typename Str>
+ void AddArguments(const ::std::vector<Str>& arguments) {
+ for (typename ::std::vector<Str>::const_iterator i = arguments.begin();
+ i != arguments.end();
+ ++i) {
+ args_.insert(args_.end() - 1, posix::StrDup(i->c_str()));
+ }
+ }
+ char* const* Argv() {
+ return &args_[0];
+ }
+ private:
+ std::vector<char*> args_;
+};
+
+// A struct that encompasses the arguments to the child process of a
+// threadsafe-style death test process.
+struct ExecDeathTestArgs {
+ char* const* argv; // Command-line arguments for the child's call to exec
+ int close_fd; // File descriptor to close; the read end of a pipe
+};
+
+# if GTEST_OS_MAC
+inline char** GetEnviron() {
+ // When Google Test is built as a framework on MacOS X, the environ variable
+ // is unavailable. Apple's documentation (man environ) recommends using
+ // _NSGetEnviron() instead.
+ return *_NSGetEnviron();
+}
+# else
+// Some POSIX platforms expect you to declare environ. extern "C" makes
+// it reside in the global namespace.
+extern "C" char** environ;
+inline char** GetEnviron() { return environ; }
+# endif // GTEST_OS_MAC
+
+// The main function for a threadsafe-style death test child process.
+// This function is called in a clone()-ed process and thus must avoid
+// any potentially unsafe operations like malloc or libc functions.
+static int ExecDeathTestChildMain(void* child_arg) {
+ ExecDeathTestArgs* const args = static_cast<ExecDeathTestArgs*>(child_arg);
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd));
+
+ // We need to execute the test program in the same environment where
+ // it was originally invoked. Therefore we change to the original
+ // working directory first.
+ const char* const original_dir =
+ UnitTest::GetInstance()->original_working_dir();
+ // We can safely call chdir() as it's a direct system call.
+ if (chdir(original_dir) != 0) {
+ DeathTestAbort(String::Format("chdir(\"%s\") failed: %s",
+ original_dir,
+ GetLastErrnoDescription().c_str()));
+ return EXIT_FAILURE;
+ }
+
+ // We can safely call execve() as it's a direct system call. We
+ // cannot use execvp() as it's a libc function and thus potentially
+ // unsafe. Since execve() doesn't search the PATH, the user must
+ // invoke the test program via a valid path that contains at least
+ // one path separator.
+ execve(args->argv[0], args->argv, GetEnviron());
+ DeathTestAbort(String::Format("execve(%s, ...) in %s failed: %s",
+ args->argv[0],
+ original_dir,
+ GetLastErrnoDescription().c_str()));
+ return EXIT_FAILURE;
+}
+
+// Two utility routines that together determine the direction the stack
+// grows.
+// This could be accomplished more elegantly by a single recursive
+// function, but we want to guard against the unlikely possibility of
+// a smart compiler optimizing the recursion away.
+//
+// GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining
+// StackLowerThanAddress into StackGrowsDown, which then doesn't give
+// correct answer.
+bool StackLowerThanAddress(const void* ptr) GTEST_NO_INLINE_;
+bool StackLowerThanAddress(const void* ptr) {
+ int dummy;
+ return &dummy < ptr;
+}
+
+bool StackGrowsDown() {
+ int dummy;
+ return StackLowerThanAddress(&dummy);
+}
+
+// A threadsafe implementation of fork(2) for threadsafe-style death tests
+// that uses clone(2). It dies with an error message if anything goes
+// wrong.
+static pid_t ExecDeathTestFork(char* const* argv, int close_fd) {
+ ExecDeathTestArgs args = { argv, close_fd };
+ pid_t child_pid = -1;
+
+# if GTEST_HAS_CLONE
+ const bool use_fork = GTEST_FLAG(death_test_use_fork);
+
+ if (!use_fork) {
+ static const bool stack_grows_down = StackGrowsDown();
+ const size_t stack_size = getpagesize();
+ // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead.
+ void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE, -1, 0);
+ GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED);
+ void* const stack_top =
+ static_cast<char*>(stack) + (stack_grows_down ? stack_size : 0);
+
+ child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args);
+
+ GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1);
+ }
+# else
+ const bool use_fork = true;
+# endif // GTEST_HAS_CLONE
+
+ if (use_fork && (child_pid = fork()) == 0) {
+ ExecDeathTestChildMain(&args);
+ _exit(0);
+ }
+
+ GTEST_DEATH_TEST_CHECK_(child_pid != -1);
+ return child_pid;
+}
+
+// The AssumeRole process for a fork-and-exec death test. It re-executes the
+// main program from the beginning, setting the --gtest_filter
+// and --gtest_internal_run_death_test flags to cause only the current
+// death test to be re-run.
+DeathTest::TestRole ExecDeathTest::AssumeRole() {
+ const UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const TestInfo* const info = impl->current_test_info();
+ const int death_test_index = info->result()->death_test_count();
+
+ if (flag != NULL) {
+ set_write_fd(flag->write_fd());
+ return EXECUTE_TEST;
+ }
+
+ int pipe_fd[2];
+ GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
+ // Clear the close-on-exec flag on the write end of the pipe, lest
+ // it be closed when the child process does an exec:
+ GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1);
+
+ const String filter_flag =
+ String::Format("--%s%s=%s.%s",
+ GTEST_FLAG_PREFIX_, kFilterFlag,
+ info->test_case_name(), info->name());
+ const String internal_flag =
+ String::Format("--%s%s=%s|%d|%d|%d",
+ GTEST_FLAG_PREFIX_, kInternalRunDeathTestFlag,
+ file_, line_, death_test_index, pipe_fd[1]);
+ Arguments args;
+ args.AddArguments(GetArgvs());
+ args.AddArgument(filter_flag.c_str());
+ args.AddArgument(internal_flag.c_str());
+
+ DeathTest::set_last_death_test_message("");
+
+ CaptureStderr();
+ // See the comment in NoExecDeathTest::AssumeRole for why the next line
+ // is necessary.
+ FlushInfoLog();
+
+ const pid_t child_pid = ExecDeathTestFork(args.Argv(), pipe_fd[0]);
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
+ set_child_pid(child_pid);
+ set_read_fd(pipe_fd[0]);
+ set_spawned(true);
+ return OVERSEE_TEST;
+}
+
+# endif // !GTEST_OS_WINDOWS
+
+// Creates a concrete DeathTest-derived class that depends on the
+// --gtest_death_test_style flag, and sets the pointer pointed to
+// by the "test" argument to its address. If the test should be
+// skipped, sets that pointer to NULL. Returns true, unless the
+// flag is set to an invalid value.
+bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex,
+ const char* file, int line,
+ DeathTest** test) {
+ UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const int death_test_index = impl->current_test_info()
+ ->increment_death_test_count();
+
+ if (flag != NULL) {
+ if (death_test_index > flag->index()) {
+ DeathTest::set_last_death_test_message(String::Format(
+ "Death test count (%d) somehow exceeded expected maximum (%d)",
+ death_test_index, flag->index()));
+ return false;
+ }
+
+ if (!(flag->file() == file && flag->line() == line &&
+ flag->index() == death_test_index)) {
+ *test = NULL;
+ return true;
+ }
+ }
+
+# if GTEST_OS_WINDOWS
+
+ if (GTEST_FLAG(death_test_style) == "threadsafe" ||
+ GTEST_FLAG(death_test_style) == "fast") {
+ *test = new WindowsDeathTest(statement, regex, file, line);
+ }
+
+# else
+
+ if (GTEST_FLAG(death_test_style) == "threadsafe") {
+ *test = new ExecDeathTest(statement, regex, file, line);
+ } else if (GTEST_FLAG(death_test_style) == "fast") {
+ *test = new NoExecDeathTest(statement, regex);
+ }
+
+# endif // GTEST_OS_WINDOWS
+
+ else { // NOLINT - this is more readable than unbalanced brackets inside #if.
+ DeathTest::set_last_death_test_message(String::Format(
+ "Unknown death test style \"%s\" encountered",
+ GTEST_FLAG(death_test_style).c_str()));
+ return false;
+ }
+
+ return true;
+}
+
+// Splits a given string on a given delimiter, populating a given
+// vector with the fields. GTEST_HAS_DEATH_TEST implies that we have
+// ::std::string, so we can use it here.
+static void SplitString(const ::std::string& str, char delimiter,
+ ::std::vector< ::std::string>* dest) {
+ ::std::vector< ::std::string> parsed;
+ ::std::string::size_type pos = 0;
+ while (::testing::internal::AlwaysTrue()) {
+ const ::std::string::size_type colon = str.find(delimiter, pos);
+ if (colon == ::std::string::npos) {
+ parsed.push_back(str.substr(pos));
+ break;
+ } else {
+ parsed.push_back(str.substr(pos, colon - pos));
+ pos = colon + 1;
+ }
+ }
+ dest->swap(parsed);
+}
+
+# if GTEST_OS_WINDOWS
+// Recreates the pipe and event handles from the provided parameters,
+// signals the event, and returns a file descriptor wrapped around the pipe
+// handle. This function is called in the child process only.
+int GetStatusFileDescriptor(unsigned int parent_process_id,
+ size_t write_handle_as_size_t,
+ size_t event_handle_as_size_t) {
+ AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE,
+ FALSE, // Non-inheritable.
+ parent_process_id));
+ if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) {
+ DeathTestAbort(String::Format("Unable to open parent process %u",
+ parent_process_id));
+ }
+
+ // TODO(vladl@google.com): Replace the following check with a
+ // compile-time assertion when available.
+ GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t));
+
+ const HANDLE write_handle =
+ reinterpret_cast<HANDLE>(write_handle_as_size_t);
+ HANDLE dup_write_handle;
+
+ // The newly initialized handle is accessible only in in the parent
+ // process. To obtain one accessible within the child, we need to use
+ // DuplicateHandle.
+ if (!::DuplicateHandle(parent_process_handle.Get(), write_handle,
+ ::GetCurrentProcess(), &dup_write_handle,
+ 0x0, // Requested privileges ignored since
+ // DUPLICATE_SAME_ACCESS is used.
+ FALSE, // Request non-inheritable handler.
+ DUPLICATE_SAME_ACCESS)) {
+ DeathTestAbort(String::Format(
+ "Unable to duplicate the pipe handle %Iu from the parent process %u",
+ write_handle_as_size_t, parent_process_id));
+ }
+
+ const HANDLE event_handle = reinterpret_cast<HANDLE>(event_handle_as_size_t);
+ HANDLE dup_event_handle;
+
+ if (!::DuplicateHandle(parent_process_handle.Get(), event_handle,
+ ::GetCurrentProcess(), &dup_event_handle,
+ 0x0,
+ FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ DeathTestAbort(String::Format(
+ "Unable to duplicate the event handle %Iu from the parent process %u",
+ event_handle_as_size_t, parent_process_id));
+ }
+
+ const int write_fd =
+ ::_open_osfhandle(reinterpret_cast<intptr_t>(dup_write_handle), O_APPEND);
+ if (write_fd == -1) {
+ DeathTestAbort(String::Format(
+ "Unable to convert pipe handle %Iu to a file descriptor",
+ write_handle_as_size_t));
+ }
+
+ // Signals the parent that the write end of the pipe has been acquired
+ // so the parent can release its own write end.
+ ::SetEvent(dup_event_handle);
+
+ return write_fd;
+}
+# endif // GTEST_OS_WINDOWS
+
+// Returns a newly created InternalRunDeathTestFlag object with fields
+// initialized from the GTEST_FLAG(internal_run_death_test) flag if
+// the flag is specified; otherwise returns NULL.
+InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() {
+ if (GTEST_FLAG(internal_run_death_test) == "") return NULL;
+
+ // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we
+ // can use it here.
+ int line = -1;
+ int index = -1;
+ ::std::vector< ::std::string> fields;
+ SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields);
+ int write_fd = -1;
+
+# if GTEST_OS_WINDOWS
+
+ unsigned int parent_process_id = 0;
+ size_t write_handle_as_size_t = 0;
+ size_t event_handle_as_size_t = 0;
+
+ if (fields.size() != 6
+ || !ParseNaturalNumber(fields[1], &line)
+ || !ParseNaturalNumber(fields[2], &index)
+ || !ParseNaturalNumber(fields[3], &parent_process_id)
+ || !ParseNaturalNumber(fields[4], &write_handle_as_size_t)
+ || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) {
+ DeathTestAbort(String::Format(
+ "Bad --gtest_internal_run_death_test flag: %s",
+ GTEST_FLAG(internal_run_death_test).c_str()));
+ }
+ write_fd = GetStatusFileDescriptor(parent_process_id,
+ write_handle_as_size_t,
+ event_handle_as_size_t);
+# else
+
+ if (fields.size() != 4
+ || !ParseNaturalNumber(fields[1], &line)
+ || !ParseNaturalNumber(fields[2], &index)
+ || !ParseNaturalNumber(fields[3], &write_fd)) {
+ DeathTestAbort(String::Format(
+ "Bad --gtest_internal_run_death_test flag: %s",
+ GTEST_FLAG(internal_run_death_test).c_str()));
+ }
+
+# endif // GTEST_OS_WINDOWS
+
+ return new InternalRunDeathTestFlag(fields[0], line, index, write_fd);
+}
+
+} // namespace internal
+
+#endif // GTEST_HAS_DEATH_TEST
+
+} // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: keith.ray@gmail.com (Keith Ray)
+
+
+#include <stdlib.h>
+
+#if GTEST_OS_WINDOWS_MOBILE
+# include <windows.h>
+#elif GTEST_OS_WINDOWS
+# include <direct.h>
+# include <io.h>
+#elif GTEST_OS_SYMBIAN || GTEST_OS_NACL
+// Symbian OpenC and NaCl have PATH_MAX in sys/syslimits.h
+# include <sys/syslimits.h>
+#else
+# include <limits.h>
+# include <climits> // Some Linux distributions define PATH_MAX here.
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+#if GTEST_OS_WINDOWS
+# define GTEST_PATH_MAX_ _MAX_PATH
+#elif defined(PATH_MAX)
+# define GTEST_PATH_MAX_ PATH_MAX
+#elif defined(_XOPEN_PATH_MAX)
+# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX
+#else
+# define GTEST_PATH_MAX_ _POSIX_PATH_MAX
+#endif // GTEST_OS_WINDOWS
+
+
+namespace testing {
+namespace internal {
+
+#if GTEST_OS_WINDOWS
+// On Windows, '\\' is the standard path separator, but many tools and the
+// Windows API also accept '/' as an alternate path separator. Unless otherwise
+// noted, a file path can contain either kind of path separators, or a mixture
+// of them.
+const char kPathSeparator = '\\';
+const char kAlternatePathSeparator = '/';
+const char kPathSeparatorString[] = "\\";
+const char kAlternatePathSeparatorString[] = "/";
+# if GTEST_OS_WINDOWS_MOBILE
+// Windows CE doesn't have a current directory. You should not use
+// the current directory in tests on Windows CE, but this at least
+// provides a reasonable fallback.
+const char kCurrentDirectoryString[] = "\\";
+// Windows CE doesn't define INVALID_FILE_ATTRIBUTES
+const DWORD kInvalidFileAttributes = 0xffffffff;
+# else
+const char kCurrentDirectoryString[] = ".\\";
+# endif // GTEST_OS_WINDOWS_MOBILE
+#else
+const char kPathSeparator = '/';
+const char kPathSeparatorString[] = "/";
+const char kCurrentDirectoryString[] = "./";
+#endif // GTEST_OS_WINDOWS
+
+// Returns whether the given character is a valid path separator.
+static bool IsPathSeparator(char c) {
+#if GTEST_HAS_ALT_PATH_SEP_
+ return (c == kPathSeparator) || (c == kAlternatePathSeparator);
+#else
+ return c == kPathSeparator;
+#endif
+}
+
+// Returns the current working directory, or "" if unsuccessful.
+FilePath FilePath::GetCurrentDir() {
+#if GTEST_OS_WINDOWS_MOBILE
+ // Windows CE doesn't have a current directory, so we just return
+ // something reasonable.
+ return FilePath(kCurrentDirectoryString);
+#elif GTEST_OS_WINDOWS
+ char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
+ return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd);
+#else
+ char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
+ return FilePath(getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd);
+#endif // GTEST_OS_WINDOWS_MOBILE
+}
+
+// Returns a copy of the FilePath with the case-insensitive extension removed.
+// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
+// FilePath("dir/file"). If a case-insensitive extension is not
+// found, returns a copy of the original FilePath.
+FilePath FilePath::RemoveExtension(const char* extension) const {
+ String dot_extension(String::Format(".%s", extension));
+ if (pathname_.EndsWithCaseInsensitive(dot_extension.c_str())) {
+ return FilePath(String(pathname_.c_str(), pathname_.length() - 4));
+ }
+ return *this;
+}
+
+// Returns a pointer to the last occurence of a valid path separator in
+// the FilePath. On Windows, for example, both '/' and '\' are valid path
+// separators. Returns NULL if no path separator was found.
+const char* FilePath::FindLastPathSeparator() const {
+ const char* const last_sep = strrchr(c_str(), kPathSeparator);
+#if GTEST_HAS_ALT_PATH_SEP_
+ const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator);
+ // Comparing two pointers of which only one is NULL is undefined.
+ if (last_alt_sep != NULL &&
+ (last_sep == NULL || last_alt_sep > last_sep)) {
+ return last_alt_sep;
+ }
+#endif
+ return last_sep;
+}
+
+// Returns a copy of the FilePath with the directory part removed.
+// Example: FilePath("path/to/file").RemoveDirectoryName() returns
+// FilePath("file"). If there is no directory part ("just_a_file"), it returns
+// the FilePath unmodified. If there is no file part ("just_a_dir/") it
+// returns an empty FilePath ("").
+// On Windows platform, '\' is the path separator, otherwise it is '/'.
+FilePath FilePath::RemoveDirectoryName() const {
+ const char* const last_sep = FindLastPathSeparator();
+ return last_sep ? FilePath(String(last_sep + 1)) : *this;
+}
+
+// RemoveFileName returns the directory path with the filename removed.
+// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
+// If the FilePath is "a_file" or "/a_file", RemoveFileName returns
+// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
+// not have a file, like "just/a/dir/", it returns the FilePath unmodified.
+// On Windows platform, '\' is the path separator, otherwise it is '/'.
+FilePath FilePath::RemoveFileName() const {
+ const char* const last_sep = FindLastPathSeparator();
+ String dir;
+ if (last_sep) {
+ dir = String(c_str(), last_sep + 1 - c_str());
+ } else {
+ dir = kCurrentDirectoryString;
+ }
+ return FilePath(dir);
+}
+
+// Helper functions for naming files in a directory for xml output.
+
+// Given directory = "dir", base_name = "test", number = 0,
+// extension = "xml", returns "dir/test.xml". If number is greater
+// than zero (e.g., 12), returns "dir/test_12.xml".
+// On Windows platform, uses \ as the separator rather than /.
+FilePath FilePath::MakeFileName(const FilePath& directory,
+ const FilePath& base_name,
+ int number,
+ const char* extension) {
+ String file;
+ if (number == 0) {
+ file = String::Format("%s.%s", base_name.c_str(), extension);
+ } else {
+ file = String::Format("%s_%d.%s", base_name.c_str(), number, extension);
+ }
+ return ConcatPaths(directory, FilePath(file));
+}
+
+// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml".
+// On Windows, uses \ as the separator rather than /.
+FilePath FilePath::ConcatPaths(const FilePath& directory,
+ const FilePath& relative_path) {
+ if (directory.IsEmpty())
+ return relative_path;
+ const FilePath dir(directory.RemoveTrailingPathSeparator());
+ return FilePath(String::Format("%s%c%s", dir.c_str(), kPathSeparator,
+ relative_path.c_str()));
+}
+
+// Returns true if pathname describes something findable in the file-system,
+// either a file, directory, or whatever.
+bool FilePath::FileOrDirectoryExists() const {
+#if GTEST_OS_WINDOWS_MOBILE
+ LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str());
+ const DWORD attributes = GetFileAttributes(unicode);
+ delete [] unicode;
+ return attributes != kInvalidFileAttributes;
+#else
+ posix::StatStruct file_stat;
+ return posix::Stat(pathname_.c_str(), &file_stat) == 0;
+#endif // GTEST_OS_WINDOWS_MOBILE
+}
+
+// Returns true if pathname describes a directory in the file-system
+// that exists.
+bool FilePath::DirectoryExists() const {
+ bool result = false;
+#if GTEST_OS_WINDOWS
+ // Don't strip off trailing separator if path is a root directory on
+ // Windows (like "C:\\").
+ const FilePath& path(IsRootDirectory() ? *this :
+ RemoveTrailingPathSeparator());
+#else
+ const FilePath& path(*this);
+#endif
+
+#if GTEST_OS_WINDOWS_MOBILE
+ LPCWSTR unicode = String::AnsiToUtf16(path.c_str());
+ const DWORD attributes = GetFileAttributes(unicode);
+ delete [] unicode;
+ if ((attributes != kInvalidFileAttributes) &&
+ (attributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ result = true;
+ }
+#else
+ posix::StatStruct file_stat;
+ result = posix::Stat(path.c_str(), &file_stat) == 0 &&
+ posix::IsDir(file_stat);
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+ return result;
+}
+
+// Returns true if pathname describes a root directory. (Windows has one
+// root directory per disk drive.)
+bool FilePath::IsRootDirectory() const {
+#if GTEST_OS_WINDOWS
+ // TODO(wan@google.com): on Windows a network share like
+ // \\server\share can be a root directory, although it cannot be the
+ // current directory. Handle this properly.
+ return pathname_.length() == 3 && IsAbsolutePath();
+#else
+ return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]);
+#endif
+}
+
+// Returns true if pathname describes an absolute path.
+bool FilePath::IsAbsolutePath() const {
+ const char* const name = pathname_.c_str();
+#if GTEST_OS_WINDOWS
+ return pathname_.length() >= 3 &&
+ ((name[0] >= 'a' && name[0] <= 'z') ||
+ (name[0] >= 'A' && name[0] <= 'Z')) &&
+ name[1] == ':' &&
+ IsPathSeparator(name[2]);
+#else
+ return IsPathSeparator(name[0]);
+#endif
+}
+
+// Returns a pathname for a file that does not currently exist. The pathname
+// will be directory/base_name.extension or
+// directory/base_name_<number>.extension if directory/base_name.extension
+// already exists. The number will be incremented until a pathname is found
+// that does not already exist.
+// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
+// There could be a race condition if two or more processes are calling this
+// function at the same time -- they could both pick the same filename.
+FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
+ const FilePath& base_name,
+ const char* extension) {
+ FilePath full_pathname;
+ int number = 0;
+ do {
+ full_pathname.Set(MakeFileName(directory, base_name, number++, extension));
+ } while (full_pathname.FileOrDirectoryExists());
+ return full_pathname;
+}
+
+// Returns true if FilePath ends with a path separator, which indicates that
+// it is intended to represent a directory. Returns false otherwise.
+// This does NOT check that a directory (or file) actually exists.
+bool FilePath::IsDirectory() const {
+ return !pathname_.empty() &&
+ IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]);
+}
+
+// Create directories so that path exists. Returns true if successful or if
+// the directories already exist; returns false if unable to create directories
+// for any reason.
+bool FilePath::CreateDirectoriesRecursively() const {
+ if (!this->IsDirectory()) {
+ return false;
+ }
+
+ if (pathname_.length() == 0 || this->DirectoryExists()) {
+ return true;
+ }
+
+ const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName());
+ return parent.CreateDirectoriesRecursively() && this->CreateFolder();
+}
+
+// Create the directory so that path exists. Returns true if successful or
+// if the directory already exists; returns false if unable to create the
+// directory for any reason, including if the parent directory does not
+// exist. Not named "CreateDirectory" because that's a macro on Windows.
+bool FilePath::CreateFolder() const {
+#if GTEST_OS_WINDOWS_MOBILE
+ FilePath removed_sep(this->RemoveTrailingPathSeparator());
+ LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str());
+ int result = CreateDirectory(unicode, NULL) ? 0 : -1;
+ delete [] unicode;
+#elif GTEST_OS_WINDOWS
+ int result = _mkdir(pathname_.c_str());
+#else
+ int result = mkdir(pathname_.c_str(), 0777);
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+ if (result == -1) {
+ return this->DirectoryExists(); // An error is OK if the directory exists.
+ }
+ return true; // No error.
+}
+
+// If input name has a trailing separator character, remove it and return the
+// name, otherwise return the name string unmodified.
+// On Windows platform, uses \ as the separator, other platforms use /.
+FilePath FilePath::RemoveTrailingPathSeparator() const {
+ return IsDirectory()
+ ? FilePath(String(pathname_.c_str(), pathname_.length() - 1))
+ : *this;
+}
+
+// Removes any redundant separators that might be in the pathname.
+// For example, "bar///foo" becomes "bar/foo". Does not eliminate other
+// redundancies that might be in a pathname involving "." or "..".
+// TODO(wan@google.com): handle Windows network shares (e.g. \\server\share).
+void FilePath::Normalize() {
+ if (pathname_.c_str() == NULL) {
+ pathname_ = "";
+ return;
+ }
+ const char* src = pathname_.c_str();
+ char* const dest = new char[pathname_.length() + 1];
+ char* dest_ptr = dest;
+ memset(dest_ptr, 0, pathname_.length() + 1);
+
+ while (*src != '\0') {
+ *dest_ptr = *src;
+ if (!IsPathSeparator(*src)) {
+ src++;
+ } else {
+#if GTEST_HAS_ALT_PATH_SEP_
+ if (*dest_ptr == kAlternatePathSeparator) {
+ *dest_ptr = kPathSeparator;
+ }
+#endif
+ while (IsPathSeparator(*src))
+ src++;
+ }
+ dest_ptr++;
+ }
+ *dest_ptr = '\0';
+ pathname_ = dest;
+ delete[] dest;
+}
+
+} // namespace internal
+} // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+
+#include <limits.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#if GTEST_OS_WINDOWS_MOBILE
+# include <windows.h> // For TerminateProcess()
+#elif GTEST_OS_WINDOWS
+# include <io.h>
+# include <sys/stat.h>
+#else
+# include <unistd.h>
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+#if GTEST_OS_MAC
+# include <mach/mach_init.h>
+# include <mach/task.h>
+# include <mach/vm_map.h>
+#endif // GTEST_OS_MAC
+
+
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+#undef GTEST_IMPLEMENTATION_
+
+namespace testing {
+namespace internal {
+
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC and C++Builder do not provide a definition of STDERR_FILENO.
+const int kStdOutFileno = 1;
+const int kStdErrFileno = 2;
+#else
+const int kStdOutFileno = STDOUT_FILENO;
+const int kStdErrFileno = STDERR_FILENO;
+#endif // _MSC_VER
+
+#if GTEST_OS_MAC
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+size_t GetThreadCount() {
+ const task_t task = mach_task_self();
+ mach_msg_type_number_t thread_count;
+ thread_act_array_t thread_list;
+ const kern_return_t status = task_threads(task, &thread_list, &thread_count);
+ if (status == KERN_SUCCESS) {
+ // task_threads allocates resources in thread_list and we need to free them
+ // to avoid leaks.
+ vm_deallocate(task,
+ reinterpret_cast<vm_address_t>(thread_list),
+ sizeof(thread_t) * thread_count);
+ return static_cast<size_t>(thread_count);
+ } else {
+ return 0;
+ }
+}
+
+#else
+
+size_t GetThreadCount() {
+ // There's no portable way to detect the number of threads, so we just
+ // return 0 to indicate that we cannot detect it.
+ return 0;
+}
+
+#endif // GTEST_OS_MAC
+
+#if GTEST_USES_POSIX_RE
+
+// Implements RE. Currently only needed for death tests.
+
+RE::~RE() {
+ if (is_valid_) {
+ // regfree'ing an invalid regex might crash because the content
+ // of the regex is undefined. Since the regex's are essentially
+ // the same, one cannot be valid (or invalid) without the other
+ // being so too.
+ regfree(&partial_regex_);
+ regfree(&full_regex_);
+ }
+ free(const_cast<char*>(pattern_));
+}
+
+// Returns true iff regular expression re matches the entire str.
+bool RE::FullMatch(const char* str, const RE& re) {
+ if (!re.is_valid_) return false;
+
+ regmatch_t match;
+ return regexec(&re.full_regex_, str, 1, &match, 0) == 0;
+}
+
+// Returns true iff regular expression re matches a substring of str
+// (including str itself).
+bool RE::PartialMatch(const char* str, const RE& re) {
+ if (!re.is_valid_) return false;
+
+ regmatch_t match;
+ return regexec(&re.partial_regex_, str, 1, &match, 0) == 0;
+}
+
+// Initializes an RE from its string representation.
+void RE::Init(const char* regex) {
+ pattern_ = posix::StrDup(regex);
+
+ // Reserves enough bytes to hold the regular expression used for a
+ // full match.
+ const size_t full_regex_len = strlen(regex) + 10;
+ char* const full_pattern = new char[full_regex_len];
+
+ snprintf(full_pattern, full_regex_len, "^(%s)$", regex);
+ is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0;
+ // We want to call regcomp(&partial_regex_, ...) even if the
+ // previous expression returns false. Otherwise partial_regex_ may
+ // not be properly initialized can may cause trouble when it's
+ // freed.
+ //
+ // Some implementation of POSIX regex (e.g. on at least some
+ // versions of Cygwin) doesn't accept the empty string as a valid
+ // regex. We change it to an equivalent form "()" to be safe.
+ if (is_valid_) {
+ const char* const partial_regex = (*regex == '\0') ? "()" : regex;
+ is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0;
+ }
+ EXPECT_TRUE(is_valid_)
+ << "Regular expression \"" << regex
+ << "\" is not a valid POSIX Extended regular expression.";
+
+ delete[] full_pattern;
+}
+
+#elif GTEST_USES_SIMPLE_RE
+
+// Returns true iff ch appears anywhere in str (excluding the
+// terminating '\0' character).
+bool IsInSet(char ch, const char* str) {
+ return ch != '\0' && strchr(str, ch) != NULL;
+}
+
+// Returns true iff ch belongs to the given classification. Unlike
+// similar functions in <ctype.h>, these aren't affected by the
+// current locale.
+bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
+bool IsAsciiPunct(char ch) {
+ return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~");
+}
+bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
+bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
+bool IsAsciiWordChar(char ch) {
+ return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') ||
+ ('0' <= ch && ch <= '9') || ch == '_';
+}
+
+// Returns true iff "\\c" is a supported escape sequence.
+bool IsValidEscape(char c) {
+ return (IsAsciiPunct(c) || IsInSet(c, "dDfnrsStvwW"));
+}
+
+// Returns true iff the given atom (specified by escaped and pattern)
+// matches ch. The result is undefined if the atom is invalid.
+bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
+ if (escaped) { // "\\p" where p is pattern_char.
+ switch (pattern_char) {
+ case 'd': return IsAsciiDigit(ch);
+ case 'D': return !IsAsciiDigit(ch);
+ case 'f': return ch == '\f';
+ case 'n': return ch == '\n';
+ case 'r': return ch == '\r';
+ case 's': return IsAsciiWhiteSpace(ch);
+ case 'S': return !IsAsciiWhiteSpace(ch);
+ case 't': return ch == '\t';
+ case 'v': return ch == '\v';
+ case 'w': return IsAsciiWordChar(ch);
+ case 'W': return !IsAsciiWordChar(ch);
+ }
+ return IsAsciiPunct(pattern_char) && pattern_char == ch;
+ }
+
+ return (pattern_char == '.' && ch != '\n') || pattern_char == ch;
+}
+
+// Helper function used by ValidateRegex() to format error messages.
+String FormatRegexSyntaxError(const char* regex, int index) {
+ return (Message() << "Syntax error at index " << index
+ << " in simple regular expression \"" << regex << "\": ").GetString();
+}
+
+// Generates non-fatal failures and returns false if regex is invalid;
+// otherwise returns true.
+bool ValidateRegex(const char* regex) {
+ if (regex == NULL) {
+ // TODO(wan@google.com): fix the source file location in the
+ // assertion failures to match where the regex is used in user
+ // code.
+ ADD_FAILURE() << "NULL is not a valid simple regular expression.";
+ return false;
+ }
+
+ bool is_valid = true;
+
+ // True iff ?, *, or + can follow the previous atom.
+ bool prev_repeatable = false;
+ for (int i = 0; regex[i]; i++) {
+ if (regex[i] == '\\') { // An escape sequence
+ i++;
+ if (regex[i] == '\0') {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
+ << "'\\' cannot appear at the end.";
+ return false;
+ }
+
+ if (!IsValidEscape(regex[i])) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
+ << "invalid escape sequence \"\\" << regex[i] << "\".";
+ is_valid = false;
+ }
+ prev_repeatable = true;
+ } else { // Not an escape sequence.
+ const char ch = regex[i];
+
+ if (ch == '^' && i > 0) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'^' can only appear at the beginning.";
+ is_valid = false;
+ } else if (ch == '$' && regex[i + 1] != '\0') {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'$' can only appear at the end.";
+ is_valid = false;
+ } else if (IsInSet(ch, "()[]{}|")) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'" << ch << "' is unsupported.";
+ is_valid = false;
+ } else if (IsRepeat(ch) && !prev_repeatable) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'" << ch << "' can only follow a repeatable token.";
+ is_valid = false;
+ }
+
+ prev_repeatable = !IsInSet(ch, "^$?*+");
+ }
+ }
+
+ return is_valid;
+}
+
+// Matches a repeated regex atom followed by a valid simple regular
+// expression. The regex atom is defined as c if escaped is false,
+// or \c otherwise. repeat is the repetition meta character (?, *,
+// or +). The behavior is undefined if str contains too many
+// characters to be indexable by size_t, in which case the test will
+// probably time out anyway. We are fine with this limitation as
+// std::string has it too.
+bool MatchRepetitionAndRegexAtHead(
+ bool escaped, char c, char repeat, const char* regex,
+ const char* str) {
+ const size_t min_count = (repeat == '+') ? 1 : 0;
+ const size_t max_count = (repeat == '?') ? 1 :
+ static_cast<size_t>(-1) - 1;
+ // We cannot call numeric_limits::max() as it conflicts with the
+ // max() macro on Windows.
+
+ for (size_t i = 0; i <= max_count; ++i) {
+ // We know that the atom matches each of the first i characters in str.
+ if (i >= min_count && MatchRegexAtHead(regex, str + i)) {
+ // We have enough matches at the head, and the tail matches too.
+ // Since we only care about *whether* the pattern matches str
+ // (as opposed to *how* it matches), there is no need to find a
+ // greedy match.
+ return true;
+ }
+ if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i]))
+ return false;
+ }
+ return false;
+}
+
+// Returns true iff regex matches a prefix of str. regex must be a
+// valid simple regular expression and not start with "^", or the
+// result is undefined.
+bool MatchRegexAtHead(const char* regex, const char* str) {
+ if (*regex == '\0') // An empty regex matches a prefix of anything.
+ return true;
+
+ // "$" only matches the end of a string. Note that regex being
+ // valid guarantees that there's nothing after "$" in it.
+ if (*regex == '$')
+ return *str == '\0';
+
+ // Is the first thing in regex an escape sequence?
+ const bool escaped = *regex == '\\';
+ if (escaped)
+ ++regex;
+ if (IsRepeat(regex[1])) {
+ // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so
+ // here's an indirect recursion. It terminates as the regex gets
+ // shorter in each recursion.
+ return MatchRepetitionAndRegexAtHead(
+ escaped, regex[0], regex[1], regex + 2, str);
+ } else {
+ // regex isn't empty, isn't "$", and doesn't start with a
+ // repetition. We match the first atom of regex with the first
+ // character of str and recurse.
+ return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) &&
+ MatchRegexAtHead(regex + 1, str + 1);
+ }
+}
+
+// Returns true iff regex matches any substring of str. regex must be
+// a valid simple regular expression, or the result is undefined.
+//
+// The algorithm is recursive, but the recursion depth doesn't exceed
+// the regex length, so we won't need to worry about running out of
+// stack space normally. In rare cases the time complexity can be
+// exponential with respect to the regex length + the string length,
+// but usually it's must faster (often close to linear).
+bool MatchRegexAnywhere(const char* regex, const char* str) {
+ if (regex == NULL || str == NULL)
+ return false;
+
+ if (*regex == '^')
+ return MatchRegexAtHead(regex + 1, str);
+
+ // A successful match can be anywhere in str.
+ do {
+ if (MatchRegexAtHead(regex, str))
+ return true;
+ } while (*str++ != '\0');
+ return false;
+}
+
+// Implements the RE class.
+
+RE::~RE() {
+ free(const_cast<char*>(pattern_));
+ free(const_cast<char*>(full_pattern_));
+}
+
+// Returns true iff regular expression re matches the entire str.
+bool RE::FullMatch(const char* str, const RE& re) {
+ return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str);
+}
+
+// Returns true iff regular expression re matches a substring of str
+// (including str itself).
+bool RE::PartialMatch(const char* str, const RE& re) {
+ return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str);
+}
+
+// Initializes an RE from its string representation.
+void RE::Init(const char* regex) {
+ pattern_ = full_pattern_ = NULL;
+ if (regex != NULL) {
+ pattern_ = posix::StrDup(regex);
+ }
+
+ is_valid_ = ValidateRegex(regex);
+ if (!is_valid_) {
+ // No need to calculate the full pattern when the regex is invalid.
+ return;
+ }
+
+ const size_t len = strlen(regex);
+ // Reserves enough bytes to hold the regular expression used for a
+ // full match: we need space to prepend a '^', append a '$', and
+ // terminate the string with '\0'.
+ char* buffer = static_cast<char*>(malloc(len + 3));
+ full_pattern_ = buffer;
+
+ if (*regex != '^')
+ *buffer++ = '^'; // Makes sure full_pattern_ starts with '^'.
+
+ // We don't use snprintf or strncpy, as they trigger a warning when
+ // compiled with VC++ 8.0.
+ memcpy(buffer, regex, len);
+ buffer += len;
+
+ if (len == 0 || regex[len - 1] != '$')
+ *buffer++ = '$'; // Makes sure full_pattern_ ends with '$'.
+
+ *buffer = '\0';
+}
+
+#endif // GTEST_USES_POSIX_RE
+
+const char kUnknownFile[] = "unknown file";
+
+// Formats a source file path and a line number as they would appear
+// in an error message from the compiler used to compile this code.
+GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) {
+ const char* const file_name = file == NULL ? kUnknownFile : file;
+
+ if (line < 0) {
+ return String::Format("%s:", file_name).c_str();
+ }
+#ifdef _MSC_VER
+ return String::Format("%s(%d):", file_name, line).c_str();
+#else
+ return String::Format("%s:%d:", file_name, line).c_str();
+#endif // _MSC_VER
+}
+
+// Formats a file location for compiler-independent XML output.
+// Although this function is not platform dependent, we put it next to
+// FormatFileLocation in order to contrast the two functions.
+// Note that FormatCompilerIndependentFileLocation() does NOT append colon
+// to the file location it produces, unlike FormatFileLocation().
+GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
+ const char* file, int line) {
+ const char* const file_name = file == NULL ? kUnknownFile : file;
+
+ if (line < 0)
+ return file_name;
+ else
+ return String::Format("%s:%d", file_name, line).c_str();
+}
+
+
+GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line)
+ : severity_(severity) {
+ const char* const marker =
+ severity == GTEST_INFO ? "[ INFO ]" :
+ severity == GTEST_WARNING ? "[WARNING]" :
+ severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]";
+ GetStream() << ::std::endl << marker << " "
+ << FormatFileLocation(file, line).c_str() << ": ";
+}
+
+// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+GTestLog::~GTestLog() {
+ GetStream() << ::std::endl;
+ if (severity_ == GTEST_FATAL) {
+ fflush(stderr);
+ posix::Abort();
+ }
+}
+// Disable Microsoft deprecation warnings for POSIX functions called from
+// this class (creat, dup, dup2, and close)
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable: 4996)
+#endif // _MSC_VER
+
+#if GTEST_HAS_STREAM_REDIRECTION
+
+// Object that captures an output stream (stdout/stderr).
+class CapturedStream {
+ public:
+ // The ctor redirects the stream to a temporary file.
+ CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
+
+# if GTEST_OS_WINDOWS
+ char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT
+ char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT
+
+ ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path);
+ const UINT success = ::GetTempFileNameA(temp_dir_path,
+ "gtest_redir",
+ 0, // Generate unique file name.
+ temp_file_path);
+ GTEST_CHECK_(success != 0)
+ << "Unable to create a temporary file in " << temp_dir_path;
+ const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE);
+ GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file "
+ << temp_file_path;
+ filename_ = temp_file_path;
+# else
+ // There's no guarantee that a test has write access to the
+ // current directory, so we create the temporary file in the /tmp
+ // directory instead.
+ char name_template[] = "/tmp/captured_stream.XXXXXX";
+ const int captured_fd = mkstemp(name_template);
+ filename_ = name_template;
+# endif // GTEST_OS_WINDOWS
+ fflush(NULL);
+ dup2(captured_fd, fd_);
+ close(captured_fd);
+ }
+
+ ~CapturedStream() {
+ remove(filename_.c_str());
+ }
+
+ String GetCapturedString() {
+ if (uncaptured_fd_ != -1) {
+ // Restores the original stream.
+ fflush(NULL);
+ dup2(uncaptured_fd_, fd_);
+ close(uncaptured_fd_);
+ uncaptured_fd_ = -1;
+ }
+
+ FILE* const file = posix::FOpen(filename_.c_str(), "r");
+ const String content = ReadEntireFile(file);
+ posix::FClose(file);
+ return content;
+ }
+
+ private:
+ // Reads the entire content of a file as a String.
+ static String ReadEntireFile(FILE* file);
+
+ // Returns the size (in bytes) of a file.
+ static size_t GetFileSize(FILE* file);
+
+ const int fd_; // A stream to capture.
+ int uncaptured_fd_;
+ // Name of the temporary file holding the stderr output.
+ ::std::string filename_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream);
+};
+
+// Returns the size (in bytes) of a file.
+size_t CapturedStream::GetFileSize(FILE* file) {
+ fseek(file, 0, SEEK_END);
+ return static_cast<size_t>(ftell(file));
+}
+
+// Reads the entire content of a file as a string.
+String CapturedStream::ReadEntireFile(FILE* file) {
+ const size_t file_size = GetFileSize(file);
+ char* const buffer = new char[file_size];
+
+ size_t bytes_last_read = 0; // # of bytes read in the last fread()
+ size_t bytes_read = 0; // # of bytes read so far
+
+ fseek(file, 0, SEEK_SET);
+
+ // Keeps reading the file until we cannot read further or the
+ // pre-determined file size is reached.
+ do {
+ bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file);
+ bytes_read += bytes_last_read;
+ } while (bytes_last_read > 0 && bytes_read < file_size);
+
+ const String content(buffer, bytes_read);
+ delete[] buffer;
+
+ return content;
+}
+
+# ifdef _MSC_VER
+# pragma warning(pop)
+# endif // _MSC_VER
+
+static CapturedStream* g_captured_stderr = NULL;
+static CapturedStream* g_captured_stdout = NULL;
+
+// Starts capturing an output stream (stdout/stderr).
+void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) {
+ if (*stream != NULL) {
+ GTEST_LOG_(FATAL) << "Only one " << stream_name
+ << " capturer can exist at a time.";
+ }
+ *stream = new CapturedStream(fd);
+}
+
+// Stops capturing the output stream and returns the captured string.
+String GetCapturedStream(CapturedStream** captured_stream) {
+ const String content = (*captured_stream)->GetCapturedString();
+
+ delete *captured_stream;
+ *captured_stream = NULL;
+
+ return content;
+}
+
+// Starts capturing stdout.
+void CaptureStdout() {
+ CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout);
+}
+
+// Starts capturing stderr.
+void CaptureStderr() {
+ CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr);
+}
+
+// Stops capturing stdout and returns the captured string.
+String GetCapturedStdout() { return GetCapturedStream(&g_captured_stdout); }
+
+// Stops capturing stderr and returns the captured string.
+String GetCapturedStderr() { return GetCapturedStream(&g_captured_stderr); }
+
+#endif // GTEST_HAS_STREAM_REDIRECTION
+
+#if GTEST_HAS_DEATH_TEST
+
+// A copy of all command line arguments. Set by InitGoogleTest().
+::std::vector<String> g_argvs;
+
+// Returns the command line as a vector of strings.
+const ::std::vector<String>& GetArgvs() { return g_argvs; }
+
+#endif // GTEST_HAS_DEATH_TEST
+
+#if GTEST_OS_WINDOWS_MOBILE
+namespace posix {
+void Abort() {
+ DebugBreak();
+ TerminateProcess(GetCurrentProcess(), 1);
+}
+} // namespace posix
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+// Returns the name of the environment variable corresponding to the
+// given flag. For example, FlagToEnvVar("foo") will return
+// "GTEST_FOO" in the open-source version.
+static String FlagToEnvVar(const char* flag) {
+ const String full_flag =
+ (Message() << GTEST_FLAG_PREFIX_ << flag).GetString();
+
+ Message env_var;
+ for (size_t i = 0; i != full_flag.length(); i++) {
+ env_var << ToUpper(full_flag.c_str()[i]);
+ }
+
+ return env_var.GetString();
+}
+
+// Parses 'str' for a 32-bit signed integer. If successful, writes
+// the result to *value and returns true; otherwise leaves *value
+// unchanged and returns false.
+bool ParseInt32(const Message& src_text, const char* str, Int32* value) {
+ // Parses the environment variable as a decimal integer.
+ char* end = NULL;
+ const long long_value = strtol(str, &end, 10); // NOLINT
+
+ // Has strtol() consumed all characters in the string?
+ if (*end != '\0') {
+ // No - an invalid character was encountered.
+ Message msg;
+ msg << "WARNING: " << src_text
+ << " is expected to be a 32-bit integer, but actually"
+ << " has value \"" << str << "\".\n";
+ printf("%s", msg.GetString().c_str());
+ fflush(stdout);
+ return false;
+ }
+
+ // Is the parsed value in the range of an Int32?
+ const Int32 result = static_cast<Int32>(long_value);
+ if (long_value == LONG_MAX || long_value == LONG_MIN ||
+ // The parsed value overflows as a long. (strtol() returns
+ // LONG_MAX or LONG_MIN when the input overflows.)
+ result != long_value
+ // The parsed value overflows as an Int32.
+ ) {
+ Message msg;
+ msg << "WARNING: " << src_text
+ << " is expected to be a 32-bit integer, but actually"
+ << " has value " << str << ", which overflows.\n";
+ printf("%s", msg.GetString().c_str());
+ fflush(stdout);
+ return false;
+ }
+
+ *value = result;
+ return true;
+}
+
+// Reads and returns the Boolean environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+//
+// The value is considered true iff it's not "0".
+bool BoolFromGTestEnv(const char* flag, bool default_value) {
+ const String env_var = FlagToEnvVar(flag);
+ const char* const string_value = posix::GetEnv(env_var.c_str());
+ return string_value == NULL ?
+ default_value : strcmp(string_value, "0") != 0;
+}
+
+// Reads and returns a 32-bit integer stored in the environment
+// variable corresponding to the given flag; if it isn't set or
+// doesn't represent a valid 32-bit integer, returns default_value.
+Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
+ const String env_var = FlagToEnvVar(flag);
+ const char* const string_value = posix::GetEnv(env_var.c_str());
+ if (string_value == NULL) {
+ // The environment variable is not set.
+ return default_value;
+ }
+
+ Int32 result = default_value;
+ if (!ParseInt32(Message() << "Environment variable " << env_var,
+ string_value, &result)) {
+ printf("The default value %s is used.\n",
+ (Message() << default_value).GetString().c_str());
+ fflush(stdout);
+ return default_value;
+ }
+
+ return result;
+}
+
+// Reads and returns the string environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+const char* StringFromGTestEnv(const char* flag, const char* default_value) {
+ const String env_var = FlagToEnvVar(flag);
+ const char* const value = posix::GetEnv(env_var.c_str());
+ return value == NULL ? default_value : value;
+}
+
+} // namespace internal
+} // namespace testing
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Test - The Google C++ Testing Framework
+//
+// This file implements a universal value printer that can print a
+// value of any type T:
+//
+// void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);
+//
+// It uses the << operator when possible, and prints the bytes in the
+// object otherwise. A user can override its behavior for a class
+// type Foo by defining either operator<<(::std::ostream&, const Foo&)
+// or void PrintTo(const Foo&, ::std::ostream*) in the namespace that
+// defines Foo.
+
+#include <ctype.h>
+#include <stdio.h>
+#include <ostream> // NOLINT
+#include <string>
+
+namespace testing {
+
+namespace {
+
+using ::std::ostream;
+
+#if GTEST_OS_WINDOWS_MOBILE // Windows CE does not define _snprintf_s.
+# define snprintf _snprintf
+#elif _MSC_VER >= 1400 // VC 8.0 and later deprecate snprintf and _snprintf.
+# define snprintf _snprintf_s
+#elif _MSC_VER
+# define snprintf _snprintf
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+// Prints a segment of bytes in the given object.
+void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start,
+ size_t count, ostream* os) {
+ char text[5] = "";
+ for (size_t i = 0; i != count; i++) {
+ const size_t j = start + i;
+ if (i != 0) {
+ // Organizes the bytes into groups of 2 for easy parsing by
+ // human.
+ if ((j % 2) == 0)
+ *os << ' ';
+ else
+ *os << '-';
+ }
+ snprintf(text, sizeof(text), "%02X", obj_bytes[j]);
+ *os << text;
+ }
+}
+
+// Prints the bytes in the given value to the given ostream.
+void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count,
+ ostream* os) {
+ // Tells the user how big the object is.
+ *os << count << "-byte object <";
+
+ const size_t kThreshold = 132;
+ const size_t kChunkSize = 64;
+ // If the object size is bigger than kThreshold, we'll have to omit
+ // some details by printing only the first and the last kChunkSize
+ // bytes.
+ // TODO(wan): let the user control the threshold using a flag.
+ if (count < kThreshold) {
+ PrintByteSegmentInObjectTo(obj_bytes, 0, count, os);
+ } else {
+ PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os);
+ *os << " ... ";
+ // Rounds up to 2-byte boundary.
+ const size_t resume_pos = (count - kChunkSize + 1)/2*2;
+ PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os);
+ }
+ *os << ">";
+}
+
+} // namespace
+
+namespace internal2 {
+
+// Delegates to PrintBytesInObjectToImpl() to print the bytes in the
+// given object. The delegation simplifies the implementation, which
+// uses the << operator and thus is easier done outside of the
+// ::testing::internal namespace, which contains a << operator that
+// sometimes conflicts with the one in STL.
+void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count,
+ ostream* os) {
+ PrintBytesInObjectToImpl(obj_bytes, count, os);
+}
+
+} // namespace internal2
+
+namespace internal {
+
+// Depending on the value of a char (or wchar_t), we print it in one
+// of three formats:
+// - as is if it's a printable ASCII (e.g. 'a', '2', ' '),
+// - as a hexidecimal escape sequence (e.g. '\x7F'), or
+// - as a special escape sequence (e.g. '\r', '\n').
+enum CharFormat {
+ kAsIs,
+ kHexEscape,
+ kSpecialEscape
+};
+
+// Returns true if c is a printable ASCII character. We test the
+// value of c directly instead of calling isprint(), which is buggy on
+// Windows Mobile.
+inline bool IsPrintableAscii(wchar_t c) {
+ return 0x20 <= c && c <= 0x7E;
+}
+
+// Prints a wide or narrow char c as a character literal without the
+// quotes, escaping it when necessary; returns how c was formatted.
+// The template argument UnsignedChar is the unsigned version of Char,
+// which is the type of c.
+template <typename UnsignedChar, typename Char>
+static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {
+ switch (static_cast<wchar_t>(c)) {
+ case L'\0':
+ *os << "\\0";
+ break;
+ case L'\'':
+ *os << "\\'";
+ break;
+ case L'\\':
+ *os << "\\\\";
+ break;
+ case L'\a':
+ *os << "\\a";
+ break;
+ case L'\b':
+ *os << "\\b";
+ break;
+ case L'\f':
+ *os << "\\f";
+ break;
+ case L'\n':
+ *os << "\\n";
+ break;
+ case L'\r':
+ *os << "\\r";
+ break;
+ case L'\t':
+ *os << "\\t";
+ break;
+ case L'\v':
+ *os << "\\v";
+ break;
+ default:
+ if (IsPrintableAscii(c)) {
+ *os << static_cast<char>(c);
+ return kAsIs;
+ } else {
+ *os << String::Format("\\x%X", static_cast<UnsignedChar>(c));
+ return kHexEscape;
+ }
+ }
+ return kSpecialEscape;
+}
+
+// Prints a char c as if it's part of a string literal, escaping it when
+// necessary; returns how c was formatted.
+static CharFormat PrintAsWideStringLiteralTo(wchar_t c, ostream* os) {
+ switch (c) {
+ case L'\'':
+ *os << "'";
+ return kAsIs;
+ case L'"':
+ *os << "\\\"";
+ return kSpecialEscape;
+ default:
+ return PrintAsCharLiteralTo<wchar_t>(c, os);
+ }
+}
+
+// Prints a char c as if it's part of a string literal, escaping it when
+// necessary; returns how c was formatted.
+static CharFormat PrintAsNarrowStringLiteralTo(char c, ostream* os) {
+ return PrintAsWideStringLiteralTo(static_cast<unsigned char>(c), os);
+}
+
+// Prints a wide or narrow character c and its code. '\0' is printed
+// as "'\\0'", other unprintable characters are also properly escaped
+// using the standard C++ escape sequence. The template argument
+// UnsignedChar is the unsigned version of Char, which is the type of c.
+template <typename UnsignedChar, typename Char>
+void PrintCharAndCodeTo(Char c, ostream* os) {
+ // First, print c as a literal in the most readable form we can find.
+ *os << ((sizeof(c) > 1) ? "L'" : "'");
+ const CharFormat format = PrintAsCharLiteralTo<UnsignedChar>(c, os);
+ *os << "'";
+
+ // To aid user debugging, we also print c's code in decimal, unless
+ // it's 0 (in which case c was printed as '\\0', making the code
+ // obvious).
+ if (c == 0)
+ return;
+ *os << " (" << String::Format("%d", c).c_str();
+
+ // For more convenience, we print c's code again in hexidecimal,
+ // unless c was already printed in the form '\x##' or the code is in
+ // [1, 9].
+ if (format == kHexEscape || (1 <= c && c <= 9)) {
+ // Do nothing.
+ } else {
+ *os << String::Format(", 0x%X",
+ static_cast<UnsignedChar>(c)).c_str();
+ }
+ *os << ")";
+}
+
+void PrintTo(unsigned char c, ::std::ostream* os) {
+ PrintCharAndCodeTo<unsigned char>(c, os);
+}
+void PrintTo(signed char c, ::std::ostream* os) {
+ PrintCharAndCodeTo<unsigned char>(c, os);
+}
+
+// Prints a wchar_t as a symbol if it is printable or as its internal
+// code otherwise and also as its code. L'\0' is printed as "L'\\0'".
+void PrintTo(wchar_t wc, ostream* os) {
+ PrintCharAndCodeTo<wchar_t>(wc, os);
+}
+
+// Prints the given array of characters to the ostream.
+// The array starts at *begin, the length is len, it may include '\0' characters
+// and may not be null-terminated.
+static void PrintCharsAsStringTo(const char* begin, size_t len, ostream* os) {
+ *os << "\"";
+ bool is_previous_hex = false;
+ for (size_t index = 0; index < len; ++index) {
+ const char cur = begin[index];
+ if (is_previous_hex && IsXDigit(cur)) {
+ // Previous character is of '\x..' form and this character can be
+ // interpreted as another hexadecimal digit in its number. Break string to
+ // disambiguate.
+ *os << "\" \"";
+ }
+ is_previous_hex = PrintAsNarrowStringLiteralTo(cur, os) == kHexEscape;
+ }
+ *os << "\"";
+}
+
+// Prints a (const) char array of 'len' elements, starting at address 'begin'.
+void UniversalPrintArray(const char* begin, size_t len, ostream* os) {
+ PrintCharsAsStringTo(begin, len, os);
+}
+
+// Prints the given array of wide characters to the ostream.
+// The array starts at *begin, the length is len, it may include L'\0'
+// characters and may not be null-terminated.
+static void PrintWideCharsAsStringTo(const wchar_t* begin, size_t len,
+ ostream* os) {
+ *os << "L\"";
+ bool is_previous_hex = false;
+ for (size_t index = 0; index < len; ++index) {
+ const wchar_t cur = begin[index];
+ if (is_previous_hex && isascii(cur) && IsXDigit(static_cast<char>(cur))) {
+ // Previous character is of '\x..' form and this character can be
+ // interpreted as another hexadecimal digit in its number. Break string to
+ // disambiguate.
+ *os << "\" L\"";
+ }
+ is_previous_hex = PrintAsWideStringLiteralTo(cur, os) == kHexEscape;
+ }
+ *os << "\"";
+}
+
+// Prints the given C string to the ostream.
+void PrintTo(const char* s, ostream* os) {
+ if (s == NULL) {
+ *os << "NULL";
+ } else {
+ *os << ImplicitCast_<const void*>(s) << " pointing to ";
+ PrintCharsAsStringTo(s, strlen(s), os);
+ }
+}
+
+// MSVC compiler can be configured to define whar_t as a typedef
+// of unsigned short. Defining an overload for const wchar_t* in that case
+// would cause pointers to unsigned shorts be printed as wide strings,
+// possibly accessing more memory than intended and causing invalid
+// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when
+// wchar_t is implemented as a native type.
+#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
+// Prints the given wide C string to the ostream.
+void PrintTo(const wchar_t* s, ostream* os) {
+ if (s == NULL) {
+ *os << "NULL";
+ } else {
+ *os << ImplicitCast_<const void*>(s) << " pointing to ";
+ PrintWideCharsAsStringTo(s, wcslen(s), os);
+ }
+}
+#endif // wchar_t is native
+
+// Prints a ::string object.
+#if GTEST_HAS_GLOBAL_STRING
+void PrintStringTo(const ::string& s, ostream* os) {
+ PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+#endif // GTEST_HAS_GLOBAL_STRING
+
+void PrintStringTo(const ::std::string& s, ostream* os) {
+ PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+
+// Prints a ::wstring object.
+#if GTEST_HAS_GLOBAL_WSTRING
+void PrintWideStringTo(const ::wstring& s, ostream* os) {
+ PrintWideCharsAsStringTo(s.data(), s.size(), os);
+}
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+#if GTEST_HAS_STD_WSTRING
+void PrintWideStringTo(const ::std::wstring& s, ostream* os) {
+ PrintWideCharsAsStringTo(s.data(), s.size(), os);
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+} // namespace internal
+
+} // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+// The Google C++ Testing Framework (Google Test)
+
+
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+#undef GTEST_IMPLEMENTATION_
+
+namespace testing {
+
+using internal::GetUnitTestImpl;
+
+// Gets the summary of the failure message by omitting the stack trace
+// in it.
+internal::String TestPartResult::ExtractSummary(const char* message) {
+ const char* const stack_trace = strstr(message, internal::kStackTraceMarker);
+ return stack_trace == NULL ? internal::String(message) :
+ internal::String(message, stack_trace - message);
+}
+
+// Prints a TestPartResult object.
+std::ostream& operator<<(std::ostream& os, const TestPartResult& result) {
+ return os
+ << result.file_name() << ":" << result.line_number() << ": "
+ << (result.type() == TestPartResult::kSuccess ? "Success" :
+ result.type() == TestPartResult::kFatalFailure ? "Fatal failure" :
+ "Non-fatal failure") << ":\n"
+ << result.message() << std::endl;
+}
+
+// Appends a TestPartResult to the array.
+void TestPartResultArray::Append(const TestPartResult& result) {
+ array_.push_back(result);
+}
+
+// Returns the TestPartResult at the given index (0-based).
+const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const {
+ if (index < 0 || index >= size()) {
+ printf("\nInvalid index (%d) into TestPartResultArray.\n", index);
+ internal::posix::Abort();
+ }
+
+ return array_[index];
+}
+
+// Returns the number of TestPartResult objects in the array.
+int TestPartResultArray::size() const {
+ return static_cast<int>(array_.size());
+}
+
+namespace internal {
+
+HasNewFatalFailureHelper::HasNewFatalFailureHelper()
+ : has_new_fatal_failure_(false),
+ original_reporter_(GetUnitTestImpl()->
+ GetTestPartResultReporterForCurrentThread()) {
+ GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this);
+}
+
+HasNewFatalFailureHelper::~HasNewFatalFailureHelper() {
+ GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(
+ original_reporter_);
+}
+
+void HasNewFatalFailureHelper::ReportTestPartResult(
+ const TestPartResult& result) {
+ if (result.fatally_failed())
+ has_new_fatal_failure_ = true;
+ original_reporter_->ReportTestPartResult(result);
+}
+
+} // namespace internal
+
+} // namespace testing
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+
+namespace testing {
+namespace internal {
+
+#if GTEST_HAS_TYPED_TEST_P
+
+// Skips to the first non-space char in str. Returns an empty string if str
+// contains only whitespace characters.
+static const char* SkipSpaces(const char* str) {
+ while (IsSpace(*str))
+ str++;
+ return str;
+}
+
+// Verifies that registered_tests match the test names in
+// defined_test_names_; returns registered_tests if successful, or
+// aborts the program otherwise.
+const char* TypedTestCasePState::VerifyRegisteredTestNames(
+ const char* file, int line, const char* registered_tests) {
+ typedef ::std::set<const char*>::const_iterator DefinedTestIter;
+ registered_ = true;
+
+ // Skip initial whitespace in registered_tests since some
+ // preprocessors prefix stringizied literals with whitespace.
+ registered_tests = SkipSpaces(registered_tests);
+
+ Message errors;
+ ::std::set<String> tests;
+ for (const char* names = registered_tests; names != NULL;
+ names = SkipComma(names)) {
+ const String name = GetPrefixUntilComma(names);
+ if (tests.count(name) != 0) {
+ errors << "Test " << name << " is listed more than once.\n";
+ continue;
+ }
+
+ bool found = false;
+ for (DefinedTestIter it = defined_test_names_.begin();
+ it != defined_test_names_.end();
+ ++it) {
+ if (name == *it) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ tests.insert(name);
+ } else {
+ errors << "No test named " << name
+ << " can be found in this test case.\n";
+ }
+ }
+
+ for (DefinedTestIter it = defined_test_names_.begin();
+ it != defined_test_names_.end();
+ ++it) {
+ if (tests.count(*it) == 0) {
+ errors << "You forgot to list test " << *it << ".\n";
+ }
+ }
+
+ const String& errors_str = errors.GetString();
+ if (errors_str != "") {
+ fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
+ errors_str.c_str());
+ fflush(stderr);
+ posix::Abort();
+ }
+
+ return registered_tests;
+}
+
+#endif // GTEST_HAS_TYPED_TEST_P
+
+} // namespace internal
+} // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Google C++ Mocking Framework (Google Mock)
+//
+// This file #includes all Google Mock implementation .cc files. The
+// purpose is to allow a user to build Google Mock by compiling this
+// file alone.
+
+// This line ensures that gmock.h can be compiled on its own, even
+// when it's fused.
+#include "gmock/gmock.h"
+
+// The following lines pull in the real gmock *.cc files.
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements cardinalities.
+
+
+#include <limits.h>
+#include <ostream> // NOLINT
+#include <sstream>
+#include <string>
+
+namespace testing {
+
+namespace {
+
+// Implements the Between(m, n) cardinality.
+class BetweenCardinalityImpl : public CardinalityInterface {
+ public:
+ BetweenCardinalityImpl(int min, int max)
+ : min_(min >= 0 ? min : 0),
+ max_(max >= min_ ? max : min_) {
+ std::stringstream ss;
+ if (min < 0) {
+ ss << "The invocation lower bound must be >= 0, "
+ << "but is actually " << min << ".";
+ internal::Expect(false, __FILE__, __LINE__, ss.str());
+ } else if (max < 0) {
+ ss << "The invocation upper bound must be >= 0, "
+ << "but is actually " << max << ".";
+ internal::Expect(false, __FILE__, __LINE__, ss.str());
+ } else if (min > max) {
+ ss << "The invocation upper bound (" << max
+ << ") must be >= the invocation lower bound (" << min
+ << ").";
+ internal::Expect(false, __FILE__, __LINE__, ss.str());
+ }
+ }
+
+ // Conservative estimate on the lower/upper bound of the number of
+ // calls allowed.
+ virtual int ConservativeLowerBound() const { return min_; }
+ virtual int ConservativeUpperBound() const { return max_; }
+
+ virtual bool IsSatisfiedByCallCount(int call_count) const {
+ return min_ <= call_count && call_count <= max_ ;
+ }
+
+ virtual bool IsSaturatedByCallCount(int call_count) const {
+ return call_count >= max_;
+ }
+
+ virtual void DescribeTo(::std::ostream* os) const;
+ private:
+ const int min_;
+ const int max_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(BetweenCardinalityImpl);
+};
+
+// Formats "n times" in a human-friendly way.
+inline internal::string FormatTimes(int n) {
+ if (n == 1) {
+ return "once";
+ } else if (n == 2) {
+ return "twice";
+ } else {
+ std::stringstream ss;
+ ss << n << " times";
+ return ss.str();
+ }
+}
+
+// Describes the Between(m, n) cardinality in human-friendly text.
+void BetweenCardinalityImpl::DescribeTo(::std::ostream* os) const {
+ if (min_ == 0) {
+ if (max_ == 0) {
+ *os << "never called";
+ } else if (max_ == INT_MAX) {
+ *os << "called any number of times";
+ } else {
+ *os << "called at most " << FormatTimes(max_);
+ }
+ } else if (min_ == max_) {
+ *os << "called " << FormatTimes(min_);
+ } else if (max_ == INT_MAX) {
+ *os << "called at least " << FormatTimes(min_);
+ } else {
+ // 0 < min_ < max_ < INT_MAX
+ *os << "called between " << min_ << " and " << max_ << " times";
+ }
+}
+
+} // Unnamed namespace
+
+// Describes the given call count to an ostream.
+void Cardinality::DescribeActualCallCountTo(int actual_call_count,
+ ::std::ostream* os) {
+ if (actual_call_count > 0) {
+ *os << "called " << FormatTimes(actual_call_count);
+ } else {
+ *os << "never called";
+ }
+}
+
+// Creates a cardinality that allows at least n calls.
+Cardinality AtLeast(int n) { return Between(n, INT_MAX); }
+
+// Creates a cardinality that allows at most n calls.
+Cardinality AtMost(int n) { return Between(0, n); }
+
+// Creates a cardinality that allows any number of calls.
+Cardinality AnyNumber() { return AtLeast(0); }
+
+// Creates a cardinality that allows between min and max calls.
+Cardinality Between(int min, int max) {
+ return Cardinality(new BetweenCardinalityImpl(min, max));
+}
+
+// Creates a cardinality that allows exactly n calls.
+Cardinality Exactly(int n) { return Between(n, n); }
+
+} // namespace testing
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file defines some utilities useful for implementing Google
+// Mock. They are subject to change without notice, so please DO NOT
+// USE THEM IN USER CODE.
+
+
+#include <ctype.h>
+#include <ostream> // NOLINT
+#include <string>
+
+namespace testing {
+namespace internal {
+
+// Converts an identifier name to a space-separated list of lower-case
+// words. Each maximum substring of the form [A-Za-z][a-z]*|\d+ is
+// treated as one word. For example, both "FooBar123" and
+// "foo_bar_123" are converted to "foo bar 123".
+string ConvertIdentifierNameToWords(const char* id_name) {
+ string result;
+ char prev_char = '\0';
+ for (const char* p = id_name; *p != '\0'; prev_char = *(p++)) {
+ // We don't care about the current locale as the input is
+ // guaranteed to be a valid C++ identifier name.
+ const bool starts_new_word = IsUpper(*p) ||
+ (!IsAlpha(prev_char) && IsLower(*p)) ||
+ (!IsDigit(prev_char) && IsDigit(*p));
+
+ if (IsAlNum(*p)) {
+ if (starts_new_word && result != "")
+ result += ' ';
+ result += ToLower(*p);
+ }
+ }
+ return result;
+}
+
+// This class reports Google Mock failures as Google Test failures. A
+// user can define another class in a similar fashion if he intends to
+// use Google Mock with a testing framework other than Google Test.
+class GoogleTestFailureReporter : public FailureReporterInterface {
+ public:
+ virtual void ReportFailure(FailureType type, const char* file, int line,
+ const string& message) {
+ AssertHelper(type == FATAL ?
+ TestPartResult::kFatalFailure :
+ TestPartResult::kNonFatalFailure,
+ file,
+ line,
+ message.c_str()) = Message();
+ if (type == FATAL) {
+ posix::Abort();
+ }
+ }
+};
+
+// Returns the global failure reporter. Will create a
+// GoogleTestFailureReporter and return it the first time called.
+FailureReporterInterface* GetFailureReporter() {
+ // Points to the global failure reporter used by Google Mock. gcc
+ // guarantees that the following use of failure_reporter is
+ // thread-safe. We may need to add additional synchronization to
+ // protect failure_reporter if we port Google Mock to other
+ // compilers.
+ static FailureReporterInterface* const failure_reporter =
+ new GoogleTestFailureReporter();
+ return failure_reporter;
+}
+
+// Protects global resources (stdout in particular) used by Log().
+static GTEST_DEFINE_STATIC_MUTEX_(g_log_mutex);
+
+// Returns true iff a log with the given severity is visible according
+// to the --gmock_verbose flag.
+bool LogIsVisible(LogSeverity severity) {
+ if (GMOCK_FLAG(verbose) == kInfoVerbosity) {
+ // Always show the log if --gmock_verbose=info.
+ return true;
+ } else if (GMOCK_FLAG(verbose) == kErrorVerbosity) {
+ // Always hide it if --gmock_verbose=error.
+ return false;
+ } else {
+ // If --gmock_verbose is neither "info" nor "error", we treat it
+ // as "warning" (its default value).
+ return severity == WARNING;
+ }
+}
+
+// Prints the given message to stdout iff 'severity' >= the level
+// specified by the --gmock_verbose flag. If stack_frames_to_skip >=
+// 0, also prints the stack trace excluding the top
+// stack_frames_to_skip frames. In opt mode, any positive
+// stack_frames_to_skip is treated as 0, since we don't know which
+// function calls will be inlined by the compiler and need to be
+// conservative.
+void Log(LogSeverity severity, const string& message,
+ int stack_frames_to_skip) {
+ if (!LogIsVisible(severity))
+ return;
+
+ // Ensures that logs from different threads don't interleave.
+ MutexLock l(&g_log_mutex);
+
+ // "using ::std::cout;" doesn't work with Symbian's STLport, where cout is a
+ // macro.
+
+ if (severity == WARNING) {
+ // Prints a GMOCK WARNING marker to make the warnings easily searchable.
+ std::cout << "\nGMOCK WARNING:";
+ }
+ // Pre-pends a new-line to message if it doesn't start with one.
+ if (message.empty() || message[0] != '\n') {
+ std::cout << "\n";
+ }
+ std::cout << message;
+ if (stack_frames_to_skip >= 0) {
+#ifdef NDEBUG
+ // In opt mode, we have to be conservative and skip no stack frame.
+ const int actual_to_skip = 0;
+#else
+ // In dbg mode, we can do what the caller tell us to do (plus one
+ // for skipping this function's stack frame).
+ const int actual_to_skip = stack_frames_to_skip + 1;
+#endif // NDEBUG
+
+ // Appends a new-line to message if it doesn't end with one.
+ if (!message.empty() && *message.rbegin() != '\n') {
+ std::cout << "\n";
+ }
+ std::cout << "Stack trace:\n"
+ << ::testing::internal::GetCurrentOsStackTraceExceptTop(
+ ::testing::UnitTest::GetInstance(), actual_to_skip);
+ }
+ std::cout << ::std::flush;
+}
+
+} // namespace internal
+} // namespace testing
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements Matcher<const string&>, Matcher<string>, and
+// utilities for defining matchers.
+
+
+#include <string.h>
+#include <sstream>
+#include <string>
+
+namespace testing {
+
+// Constructs a matcher that matches a const string& whose value is
+// equal to s.
+Matcher<const internal::string&>::Matcher(const internal::string& s) {
+ *this = Eq(s);
+}
+
+// Constructs a matcher that matches a const string& whose value is
+// equal to s.
+Matcher<const internal::string&>::Matcher(const char* s) {
+ *this = Eq(internal::string(s));
+}
+
+// Constructs a matcher that matches a string whose value is equal to s.
+Matcher<internal::string>::Matcher(const internal::string& s) { *this = Eq(s); }
+
+// Constructs a matcher that matches a string whose value is equal to s.
+Matcher<internal::string>::Matcher(const char* s) {
+ *this = Eq(internal::string(s));
+}
+
+namespace internal {
+
+// Joins a vector of strings as if they are fields of a tuple; returns
+// the joined string.
+string JoinAsTuple(const Strings& fields) {
+ switch (fields.size()) {
+ case 0:
+ return "";
+ case 1:
+ return fields[0];
+ default:
+ string result = "(" + fields[0];
+ for (size_t i = 1; i < fields.size(); i++) {
+ result += ", ";
+ result += fields[i];
+ }
+ result += ")";
+ return result;
+ }
+}
+
+// Returns the description for a matcher defined using the MATCHER*()
+// macro where the user-supplied description string is "", if
+// 'negation' is false; otherwise returns the description of the
+// negation of the matcher. 'param_values' contains a list of strings
+// that are the print-out of the matcher's parameters.
+string FormatMatcherDescription(bool negation, const char* matcher_name,
+ const Strings& param_values) {
+ string result = ConvertIdentifierNameToWords(matcher_name);
+ if (param_values.size() >= 1)
+ result += " " + JoinAsTuple(param_values);
+ return negation ? "not (" + result + ")" : result;
+}
+
+} // namespace internal
+} // namespace testing
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Mock - a framework for writing C++ mock classes.
+//
+// This file implements the spec builder syntax (ON_CALL and
+// EXPECT_CALL).
+
+
+#include <stdlib.h>
+#include <iostream> // NOLINT
+#include <map>
+#include <set>
+#include <string>
+
+#if GTEST_OS_CYGWIN || GTEST_OS_LINUX || GTEST_OS_MAC
+# include <unistd.h> // NOLINT
+#endif
+
+namespace testing {
+namespace internal {
+
+// Protects the mock object registry (in class Mock), all function
+// mockers, and all expectations.
+GTEST_DEFINE_STATIC_MUTEX_(g_gmock_mutex);
+
+// Logs a message including file and line number information.
+void LogWithLocation(testing::internal::LogSeverity severity,
+ const char* file, int line,
+ const string& message) {
+ ::std::ostringstream s;
+ s << file << ":" << line << ": " << message << ::std::endl;
+ Log(severity, s.str(), 0);
+}
+
+// Constructs an ExpectationBase object.
+ExpectationBase::ExpectationBase(const char* a_file,
+ int a_line,
+ const string& a_source_text)
+ : file_(a_file),
+ line_(a_line),
+ source_text_(a_source_text),
+ cardinality_specified_(false),
+ cardinality_(Exactly(1)),
+ call_count_(0),
+ retired_(false),
+ extra_matcher_specified_(false),
+ repeated_action_specified_(false),
+ retires_on_saturation_(false),
+ last_clause_(kNone),
+ action_count_checked_(false) {}
+
+// Destructs an ExpectationBase object.
+ExpectationBase::~ExpectationBase() {}
+
+// Explicitly specifies the cardinality of this expectation. Used by
+// the subclasses to implement the .Times() clause.
+void ExpectationBase::SpecifyCardinality(const Cardinality& a_cardinality) {
+ cardinality_specified_ = true;
+ cardinality_ = a_cardinality;
+}
+
+// Retires all pre-requisites of this expectation.
+void ExpectationBase::RetireAllPreRequisites() {
+ if (is_retired()) {
+ // We can take this short-cut as we never retire an expectation
+ // until we have retired all its pre-requisites.
+ return;
+ }
+
+ for (ExpectationSet::const_iterator it = immediate_prerequisites_.begin();
+ it != immediate_prerequisites_.end(); ++it) {
+ ExpectationBase* const prerequisite = it->expectation_base().get();
+ if (!prerequisite->is_retired()) {
+ prerequisite->RetireAllPreRequisites();
+ prerequisite->Retire();
+ }
+ }
+}
+
+// Returns true iff all pre-requisites of this expectation have been
+// satisfied.
+// L >= g_gmock_mutex
+bool ExpectationBase::AllPrerequisitesAreSatisfied() const {
+ g_gmock_mutex.AssertHeld();
+ for (ExpectationSet::const_iterator it = immediate_prerequisites_.begin();
+ it != immediate_prerequisites_.end(); ++it) {
+ if (!(it->expectation_base()->IsSatisfied()) ||
+ !(it->expectation_base()->AllPrerequisitesAreSatisfied()))
+ return false;
+ }
+ return true;
+}
+
+// Adds unsatisfied pre-requisites of this expectation to 'result'.
+// L >= g_gmock_mutex
+void ExpectationBase::FindUnsatisfiedPrerequisites(
+ ExpectationSet* result) const {
+ g_gmock_mutex.AssertHeld();
+ for (ExpectationSet::const_iterator it = immediate_prerequisites_.begin();
+ it != immediate_prerequisites_.end(); ++it) {
+ if (it->expectation_base()->IsSatisfied()) {
+ // If *it is satisfied and has a call count of 0, some of its
+ // pre-requisites may not be satisfied yet.
+ if (it->expectation_base()->call_count_ == 0) {
+ it->expectation_base()->FindUnsatisfiedPrerequisites(result);
+ }
+ } else {
+ // Now that we know *it is unsatisfied, we are not so interested
+ // in whether its pre-requisites are satisfied. Therefore we
+ // don't recursively call FindUnsatisfiedPrerequisites() here.
+ *result += *it;
+ }
+ }
+}
+
+// Describes how many times a function call matching this
+// expectation has occurred.
+// L >= g_gmock_mutex
+void ExpectationBase::DescribeCallCountTo(::std::ostream* os) const {
+ g_gmock_mutex.AssertHeld();
+
+ // Describes how many times the function is expected to be called.
+ *os << " Expected: to be ";
+ cardinality().DescribeTo(os);
+ *os << "\n Actual: ";
+ Cardinality::DescribeActualCallCountTo(call_count(), os);
+
+ // Describes the state of the expectation (e.g. is it satisfied?
+ // is it active?).
+ *os << " - " << (IsOverSaturated() ? "over-saturated" :
+ IsSaturated() ? "saturated" :
+ IsSatisfied() ? "satisfied" : "unsatisfied")
+ << " and "
+ << (is_retired() ? "retired" : "active");
+}
+
+// Checks the action count (i.e. the number of WillOnce() and
+// WillRepeatedly() clauses) against the cardinality if this hasn't
+// been done before. Prints a warning if there are too many or too
+// few actions.
+// L < mutex_
+void ExpectationBase::CheckActionCountIfNotDone() const {
+ bool should_check = false;
+ {
+ MutexLock l(&mutex_);
+ if (!action_count_checked_) {
+ action_count_checked_ = true;
+ should_check = true;
+ }
+ }
+
+ if (should_check) {
+ if (!cardinality_specified_) {
+ // The cardinality was inferred - no need to check the action
+ // count against it.
+ return;
+ }
+
+ // The cardinality was explicitly specified.
+ const int action_count = static_cast<int>(untyped_actions_.size());
+ const int upper_bound = cardinality().ConservativeUpperBound();
+ const int lower_bound = cardinality().ConservativeLowerBound();
+ bool too_many; // True if there are too many actions, or false
+ // if there are too few.
+ if (action_count > upper_bound ||
+ (action_count == upper_bound && repeated_action_specified_)) {
+ too_many = true;
+ } else if (0 < action_count && action_count < lower_bound &&
+ !repeated_action_specified_) {
+ too_many = false;
+ } else {
+ return;
+ }
+
+ ::std::stringstream ss;
+ DescribeLocationTo(&ss);
+ ss << "Too " << (too_many ? "many" : "few")
+ << " actions specified in " << source_text() << "...\n"
+ << "Expected to be ";
+ cardinality().DescribeTo(&ss);
+ ss << ", but has " << (too_many ? "" : "only ")
+ << action_count << " WillOnce()"
+ << (action_count == 1 ? "" : "s");
+ if (repeated_action_specified_) {
+ ss << " and a WillRepeatedly()";
+ }
+ ss << ".";
+ Log(WARNING, ss.str(), -1); // -1 means "don't print stack trace".
+ }
+}
+
+// Implements the .Times() clause.
+void ExpectationBase::UntypedTimes(const Cardinality& a_cardinality) {
+ if (last_clause_ == kTimes) {
+ ExpectSpecProperty(false,
+ ".Times() cannot appear "
+ "more than once in an EXPECT_CALL().");
+ } else {
+ ExpectSpecProperty(last_clause_ < kTimes,
+ ".Times() cannot appear after "
+ ".InSequence(), .WillOnce(), .WillRepeatedly(), "
+ "or .RetiresOnSaturation().");
+ }
+ last_clause_ = kTimes;
+
+ SpecifyCardinality(a_cardinality);
+}
+
+// Points to the implicit sequence introduced by a living InSequence
+// object (if any) in the current thread or NULL.
+ThreadLocal<Sequence*> g_gmock_implicit_sequence;
+
+// Reports an uninteresting call (whose description is in msg) in the
+// manner specified by 'reaction'.
+void ReportUninterestingCall(CallReaction reaction, const string& msg) {
+ switch (reaction) {
+ case ALLOW:
+ Log(INFO, msg, 3);
+ break;
+ case WARN:
+ Log(WARNING, msg, 3);
+ break;
+ default: // FAIL
+ Expect(false, NULL, -1, msg);
+ }
+}
+
+UntypedFunctionMockerBase::UntypedFunctionMockerBase()
+ : mock_obj_(NULL), name_("") {}
+
+UntypedFunctionMockerBase::~UntypedFunctionMockerBase() {}
+
+// Sets the mock object this mock method belongs to, and registers
+// this information in the global mock registry. Will be called
+// whenever an EXPECT_CALL() or ON_CALL() is executed on this mock
+// method.
+// L < g_gmock_mutex
+void UntypedFunctionMockerBase::RegisterOwner(const void* mock_obj) {
+ {
+ MutexLock l(&g_gmock_mutex);
+ mock_obj_ = mock_obj;
+ }
+ Mock::Register(mock_obj, this);
+}
+
+// Sets the mock object this mock method belongs to, and sets the name
+// of the mock function. Will be called upon each invocation of this
+// mock function.
+// L < g_gmock_mutex
+void UntypedFunctionMockerBase::SetOwnerAndName(
+ const void* mock_obj, const char* name) {
+ // We protect name_ under g_gmock_mutex in case this mock function
+ // is called from two threads concurrently.
+ MutexLock l(&g_gmock_mutex);
+ mock_obj_ = mock_obj;
+ name_ = name;
+}
+
+// Returns the name of the function being mocked. Must be called
+// after RegisterOwner() or SetOwnerAndName() has been called.
+// L < g_gmock_mutex
+const void* UntypedFunctionMockerBase::MockObject() const {
+ const void* mock_obj;
+ {
+ // We protect mock_obj_ under g_gmock_mutex in case this mock
+ // function is called from two threads concurrently.
+ MutexLock l(&g_gmock_mutex);
+ Assert(mock_obj_ != NULL, __FILE__, __LINE__,
+ "MockObject() must not be called before RegisterOwner() or "
+ "SetOwnerAndName() has been called.");
+ mock_obj = mock_obj_;
+ }
+ return mock_obj;
+}
+
+// Returns the name of this mock method. Must be called after
+// SetOwnerAndName() has been called.
+// L < g_gmock_mutex
+const char* UntypedFunctionMockerBase::Name() const {
+ const char* name;
+ {
+ // We protect name_ under g_gmock_mutex in case this mock
+ // function is called from two threads concurrently.
+ MutexLock l(&g_gmock_mutex);
+ Assert(name_ != NULL, __FILE__, __LINE__,
+ "Name() must not be called before SetOwnerAndName() has "
+ "been called.");
+ name = name_;
+ }
+ return name;
+}
+
+// Calculates the result of invoking this mock function with the given
+// arguments, prints it, and returns it. The caller is responsible
+// for deleting the result.
+// L < g_gmock_mutex
+const UntypedActionResultHolderBase*
+UntypedFunctionMockerBase::UntypedInvokeWith(const void* const untyped_args) {
+ if (untyped_expectations_.size() == 0) {
+ // No expectation is set on this mock method - we have an
+ // uninteresting call.
+
+ // We must get Google Mock's reaction on uninteresting calls
+ // made on this mock object BEFORE performing the action,
+ // because the action may DELETE the mock object and make the
+ // following expression meaningless.
+ const CallReaction reaction =
+ Mock::GetReactionOnUninterestingCalls(MockObject());
+
+ // True iff we need to print this call's arguments and return
+ // value. This definition must be kept in sync with
+ // the behavior of ReportUninterestingCall().
+ const bool need_to_report_uninteresting_call =
+ // If the user allows this uninteresting call, we print it
+ // only when he wants informational messages.
+ reaction == ALLOW ? LogIsVisible(INFO) :
+ // If the user wants this to be a warning, we print it only
+ // when he wants to see warnings.
+ reaction == WARN ? LogIsVisible(WARNING) :
+ // Otherwise, the user wants this to be an error, and we
+ // should always print detailed information in the error.
+ true;
+
+ if (!need_to_report_uninteresting_call) {
+ // Perform the action without printing the call information.
+ return this->UntypedPerformDefaultAction(untyped_args, "");
+ }
+
+ // Warns about the uninteresting call.
+ ::std::stringstream ss;
+ this->UntypedDescribeUninterestingCall(untyped_args, &ss);
+
+ // Calculates the function result.
+ const UntypedActionResultHolderBase* const result =
+ this->UntypedPerformDefaultAction(untyped_args, ss.str());
+
+ // Prints the function result.
+ if (result != NULL)
+ result->PrintAsActionResult(&ss);
+
+ ReportUninterestingCall(reaction, ss.str());
+ return result;
+ }
+
+ bool is_excessive = false;
+ ::std::stringstream ss;
+ ::std::stringstream why;
+ ::std::stringstream loc;
+ const void* untyped_action = NULL;
+
+ // The UntypedFindMatchingExpectation() function acquires and
+ // releases g_gmock_mutex.
+ const ExpectationBase* const untyped_expectation =
+ this->UntypedFindMatchingExpectation(
+ untyped_args, &untyped_action, &is_excessive,
+ &ss, &why);
+ const bool found = untyped_expectation != NULL;
+
+ // True iff we need to print the call's arguments and return value.
+ // This definition must be kept in sync with the uses of Expect()
+ // and Log() in this function.
+ const bool need_to_report_call = !found || is_excessive || LogIsVisible(INFO);
+ if (!need_to_report_call) {
+ // Perform the action without printing the call information.
+ return
+ untyped_action == NULL ?
+ this->UntypedPerformDefaultAction(untyped_args, "") :
+ this->UntypedPerformAction(untyped_action, untyped_args);
+ }
+
+ ss << " Function call: " << Name();
+ this->UntypedPrintArgs(untyped_args, &ss);
+
+ // In case the action deletes a piece of the expectation, we
+ // generate the message beforehand.
+ if (found && !is_excessive) {
+ untyped_expectation->DescribeLocationTo(&loc);
+ }
+
+ const UntypedActionResultHolderBase* const result =
+ untyped_action == NULL ?
+ this->UntypedPerformDefaultAction(untyped_args, ss.str()) :
+ this->UntypedPerformAction(untyped_action, untyped_args);
+ if (result != NULL)
+ result->PrintAsActionResult(&ss);
+ ss << "\n" << why.str();
+
+ if (!found) {
+ // No expectation matches this call - reports a failure.
+ Expect(false, NULL, -1, ss.str());
+ } else if (is_excessive) {
+ // We had an upper-bound violation and the failure message is in ss.
+ Expect(false, untyped_expectation->file(),
+ untyped_expectation->line(), ss.str());
+ } else {
+ // We had an expected call and the matching expectation is
+ // described in ss.
+ Log(INFO, loc.str() + ss.str(), 2);
+ }
+
+ return result;
+}
+
+// Returns an Expectation object that references and co-owns exp,
+// which must be an expectation on this mock function.
+Expectation UntypedFunctionMockerBase::GetHandleOf(ExpectationBase* exp) {
+ for (UntypedExpectations::const_iterator it =
+ untyped_expectations_.begin();
+ it != untyped_expectations_.end(); ++it) {
+ if (it->get() == exp) {
+ return Expectation(*it);
+ }
+ }
+
+ Assert(false, __FILE__, __LINE__, "Cannot find expectation.");
+ return Expectation();
+ // The above statement is just to make the code compile, and will
+ // never be executed.
+}
+
+// Verifies that all expectations on this mock function have been
+// satisfied. Reports one or more Google Test non-fatal failures
+// and returns false if not.
+// L >= g_gmock_mutex
+bool UntypedFunctionMockerBase::VerifyAndClearExpectationsLocked() {
+ g_gmock_mutex.AssertHeld();
+ bool expectations_met = true;
+ for (UntypedExpectations::const_iterator it =
+ untyped_expectations_.begin();
+ it != untyped_expectations_.end(); ++it) {
+ ExpectationBase* const untyped_expectation = it->get();
+ if (untyped_expectation->IsOverSaturated()) {
+ // There was an upper-bound violation. Since the error was
+ // already reported when it occurred, there is no need to do
+ // anything here.
+ expectations_met = false;
+ } else if (!untyped_expectation->IsSatisfied()) {
+ expectations_met = false;
+ ::std::stringstream ss;
+ ss << "Actual function call count doesn't match "
+ << untyped_expectation->source_text() << "...\n";
+ // No need to show the source file location of the expectation
+ // in the description, as the Expect() call that follows already
+ // takes care of it.
+ untyped_expectation->MaybeDescribeExtraMatcherTo(&ss);
+ untyped_expectation->DescribeCallCountTo(&ss);
+ Expect(false, untyped_expectation->file(),
+ untyped_expectation->line(), ss.str());
+ }
+ }
+ untyped_expectations_.clear();
+ return expectations_met;
+}
+
+} // namespace internal
+
+// Class Mock.
+
+namespace {
+
+typedef std::set<internal::UntypedFunctionMockerBase*> FunctionMockers;
+
+// The current state of a mock object. Such information is needed for
+// detecting leaked mock objects and explicitly verifying a mock's
+// expectations.
+struct MockObjectState {
+ MockObjectState()
+ : first_used_file(NULL), first_used_line(-1), leakable(false) {}
+
+ // Where in the source file an ON_CALL or EXPECT_CALL is first
+ // invoked on this mock object.
+ const char* first_used_file;
+ int first_used_line;
+ ::std::string first_used_test_case;
+ ::std::string first_used_test;
+ bool leakable; // true iff it's OK to leak the object.
+ FunctionMockers function_mockers; // All registered methods of the object.
+};
+
+// A global registry holding the state of all mock objects that are
+// alive. A mock object is added to this registry the first time
+// Mock::AllowLeak(), ON_CALL(), or EXPECT_CALL() is called on it. It
+// is removed from the registry in the mock object's destructor.
+class MockObjectRegistry {
+ public:
+ // Maps a mock object (identified by its address) to its state.
+ typedef std::map<const void*, MockObjectState> StateMap;
+
+ // This destructor will be called when a program exits, after all
+ // tests in it have been run. By then, there should be no mock
+ // object alive. Therefore we report any living object as test
+ // failure, unless the user explicitly asked us to ignore it.
+ ~MockObjectRegistry() {
+ // "using ::std::cout;" doesn't work with Symbian's STLport, where cout is
+ // a macro.
+
+ if (!GMOCK_FLAG(catch_leaked_mocks))
+ return;
+
+ int leaked_count = 0;
+ for (StateMap::const_iterator it = states_.begin(); it != states_.end();
+ ++it) {
+ if (it->second.leakable) // The user said it's fine to leak this object.
+ continue;
+
+ // TODO(wan@google.com): Print the type of the leaked object.
+ // This can help the user identify the leaked object.
+ std::cout << "\n";
+ const MockObjectState& state = it->second;
+ std::cout << internal::FormatFileLocation(state.first_used_file,
+ state.first_used_line);
+ std::cout << " ERROR: this mock object";
+ if (state.first_used_test != "") {
+ std::cout << " (used in test " << state.first_used_test_case << "."
+ << state.first_used_test << ")";
+ }
+ std::cout << " should be deleted but never is. Its address is @"
+ << it->first << ".";
+ leaked_count++;
+ }
+ if (leaked_count > 0) {
+ std::cout << "\nERROR: " << leaked_count
+ << " leaked mock " << (leaked_count == 1 ? "object" : "objects")
+ << " found at program exit.\n";
+ std::cout.flush();
+ ::std::cerr.flush();
+ // RUN_ALL_TESTS() has already returned when this destructor is
+ // called. Therefore we cannot use the normal Google Test
+ // failure reporting mechanism.
+ _exit(1); // We cannot call exit() as it is not reentrant and
+ // may already have been called.
+ }
+ }
+
+ StateMap& states() { return states_; }
+ private:
+ StateMap states_;
+};
+
+// Protected by g_gmock_mutex.
+MockObjectRegistry g_mock_object_registry;
+
+// Maps a mock object to the reaction Google Mock should have when an
+// uninteresting method is called. Protected by g_gmock_mutex.
+std::map<const void*, internal::CallReaction> g_uninteresting_call_reaction;
+
+// Sets the reaction Google Mock should have when an uninteresting
+// method of the given mock object is called.
+// L < g_gmock_mutex
+void SetReactionOnUninterestingCalls(const void* mock_obj,
+ internal::CallReaction reaction) {
+ internal::MutexLock l(&internal::g_gmock_mutex);
+ g_uninteresting_call_reaction[mock_obj] = reaction;
+}
+
+} // namespace
+
+// Tells Google Mock to allow uninteresting calls on the given mock
+// object.
+// L < g_gmock_mutex
+void Mock::AllowUninterestingCalls(const void* mock_obj) {
+ SetReactionOnUninterestingCalls(mock_obj, internal::ALLOW);
+}
+
+// Tells Google Mock to warn the user about uninteresting calls on the
+// given mock object.
+// L < g_gmock_mutex
+void Mock::WarnUninterestingCalls(const void* mock_obj) {
+ SetReactionOnUninterestingCalls(mock_obj, internal::WARN);
+}
+
+// Tells Google Mock to fail uninteresting calls on the given mock
+// object.
+// L < g_gmock_mutex
+void Mock::FailUninterestingCalls(const void* mock_obj) {
+ SetReactionOnUninterestingCalls(mock_obj, internal::FAIL);
+}
+
+// Tells Google Mock the given mock object is being destroyed and its
+// entry in the call-reaction table should be removed.
+// L < g_gmock_mutex
+void Mock::UnregisterCallReaction(const void* mock_obj) {
+ internal::MutexLock l(&internal::g_gmock_mutex);
+ g_uninteresting_call_reaction.erase(mock_obj);
+}
+
+// Returns the reaction Google Mock will have on uninteresting calls
+// made on the given mock object.
+// L < g_gmock_mutex
+internal::CallReaction Mock::GetReactionOnUninterestingCalls(
+ const void* mock_obj) {
+ internal::MutexLock l(&internal::g_gmock_mutex);
+ return (g_uninteresting_call_reaction.count(mock_obj) == 0) ?
+ internal::WARN : g_uninteresting_call_reaction[mock_obj];
+}
+
+// Tells Google Mock to ignore mock_obj when checking for leaked mock
+// objects.
+// L < g_gmock_mutex
+void Mock::AllowLeak(const void* mock_obj) {
+ internal::MutexLock l(&internal::g_gmock_mutex);
+ g_mock_object_registry.states()[mock_obj].leakable = true;
+}
+
+// Verifies and clears all expectations on the given mock object. If
+// the expectations aren't satisfied, generates one or more Google
+// Test non-fatal failures and returns false.
+// L < g_gmock_mutex
+bool Mock::VerifyAndClearExpectations(void* mock_obj) {
+ internal::MutexLock l(&internal::g_gmock_mutex);
+ return VerifyAndClearExpectationsLocked(mock_obj);
+}
+
+// Verifies all expectations on the given mock object and clears its
+// default actions and expectations. Returns true iff the
+// verification was successful.
+// L < g_gmock_mutex
+bool Mock::VerifyAndClear(void* mock_obj) {
+ internal::MutexLock l(&internal::g_gmock_mutex);
+ ClearDefaultActionsLocked(mock_obj);
+ return VerifyAndClearExpectationsLocked(mock_obj);
+}
+
+// Verifies and clears all expectations on the given mock object. If
+// the expectations aren't satisfied, generates one or more Google
+// Test non-fatal failures and returns false.
+// L >= g_gmock_mutex
+bool Mock::VerifyAndClearExpectationsLocked(void* mock_obj) {
+ internal::g_gmock_mutex.AssertHeld();
+ if (g_mock_object_registry.states().count(mock_obj) == 0) {
+ // No EXPECT_CALL() was set on the given mock object.
+ return true;
+ }
+
+ // Verifies and clears the expectations on each mock method in the
+ // given mock object.
+ bool expectations_met = true;
+ FunctionMockers& mockers =
+ g_mock_object_registry.states()[mock_obj].function_mockers;
+ for (FunctionMockers::const_iterator it = mockers.begin();
+ it != mockers.end(); ++it) {
+ if (!(*it)->VerifyAndClearExpectationsLocked()) {
+ expectations_met = false;
+ }
+ }
+
+ // We don't clear the content of mockers, as they may still be
+ // needed by ClearDefaultActionsLocked().
+ return expectations_met;
+}
+
+// Registers a mock object and a mock method it owns.
+// L < g_gmock_mutex
+void Mock::Register(const void* mock_obj,
+ internal::UntypedFunctionMockerBase* mocker) {
+ internal::MutexLock l(&internal::g_gmock_mutex);
+ g_mock_object_registry.states()[mock_obj].function_mockers.insert(mocker);
+}
+
+// Tells Google Mock where in the source code mock_obj is used in an
+// ON_CALL or EXPECT_CALL. In case mock_obj is leaked, this
+// information helps the user identify which object it is.
+// L < g_gmock_mutex
+void Mock::RegisterUseByOnCallOrExpectCall(
+ const void* mock_obj, const char* file, int line) {
+ internal::MutexLock l(&internal::g_gmock_mutex);
+ MockObjectState& state = g_mock_object_registry.states()[mock_obj];
+ if (state.first_used_file == NULL) {
+ state.first_used_file = file;
+ state.first_used_line = line;
+ const TestInfo* const test_info =
+ UnitTest::GetInstance()->current_test_info();
+ if (test_info != NULL) {
+ // TODO(wan@google.com): record the test case name when the
+ // ON_CALL or EXPECT_CALL is invoked from SetUpTestCase() or
+ // TearDownTestCase().
+ state.first_used_test_case = test_info->test_case_name();
+ state.first_used_test = test_info->name();
+ }
+ }
+}
+
+// Unregisters a mock method; removes the owning mock object from the
+// registry when the last mock method associated with it has been
+// unregistered. This is called only in the destructor of
+// FunctionMockerBase.
+// L >= g_gmock_mutex
+void Mock::UnregisterLocked(internal::UntypedFunctionMockerBase* mocker) {
+ internal::g_gmock_mutex.AssertHeld();
+ for (MockObjectRegistry::StateMap::iterator it =
+ g_mock_object_registry.states().begin();
+ it != g_mock_object_registry.states().end(); ++it) {
+ FunctionMockers& mockers = it->second.function_mockers;
+ if (mockers.erase(mocker) > 0) {
+ // mocker was in mockers and has been just removed.
+ if (mockers.empty()) {
+ g_mock_object_registry.states().erase(it);
+ }
+ return;
+ }
+ }
+}
+
+// Clears all ON_CALL()s set on the given mock object.
+// L >= g_gmock_mutex
+void Mock::ClearDefaultActionsLocked(void* mock_obj) {
+ internal::g_gmock_mutex.AssertHeld();
+
+ if (g_mock_object_registry.states().count(mock_obj) == 0) {
+ // No ON_CALL() was set on the given mock object.
+ return;
+ }
+
+ // Clears the default actions for each mock method in the given mock
+ // object.
+ FunctionMockers& mockers =
+ g_mock_object_registry.states()[mock_obj].function_mockers;
+ for (FunctionMockers::const_iterator it = mockers.begin();
+ it != mockers.end(); ++it) {
+ (*it)->ClearDefaultActionsLocked();
+ }
+
+ // We don't clear the content of mockers, as they may still be
+ // needed by VerifyAndClearExpectationsLocked().
+}
+
+Expectation::Expectation() {}
+
+Expectation::Expectation(
+ const internal::linked_ptr<internal::ExpectationBase>& an_expectation_base)
+ : expectation_base_(an_expectation_base) {}
+
+Expectation::~Expectation() {}
+
+// Adds an expectation to a sequence.
+void Sequence::AddExpectation(const Expectation& expectation) const {
+ if (*last_expectation_ != expectation) {
+ if (last_expectation_->expectation_base() != NULL) {
+ expectation.expectation_base()->immediate_prerequisites_
+ += *last_expectation_;
+ }
+ *last_expectation_ = expectation;
+ }
+}
+
+// Creates the implicit sequence if there isn't one.
+InSequence::InSequence() {
+ if (internal::g_gmock_implicit_sequence.get() == NULL) {
+ internal::g_gmock_implicit_sequence.set(new Sequence);
+ sequence_created_ = true;
+ } else {
+ sequence_created_ = false;
+ }
+}
+
+// Deletes the implicit sequence if it was created by the constructor
+// of this object.
+InSequence::~InSequence() {
+ if (sequence_created_) {
+ delete internal::g_gmock_implicit_sequence.get();
+ internal::g_gmock_implicit_sequence.set(NULL);
+ }
+}
+
+} // namespace testing
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+
+namespace testing {
+
+// TODO(wan@google.com): support using environment variables to
+// control the flag values, like what Google Test does.
+
+GMOCK_DEFINE_bool_(catch_leaked_mocks, true,
+ "true iff Google Mock should report leaked mock objects "
+ "as failures.");
+
+GMOCK_DEFINE_string_(verbose, internal::kWarningVerbosity,
+ "Controls how verbose Google Mock's output is."
+ " Valid values:\n"
+ " info - prints all messages.\n"
+ " warning - prints warnings and errors.\n"
+ " error - prints errors only.");
+
+namespace internal {
+
+// Parses a string as a command line flag. The string should have the
+// format "--gmock_flag=value". When def_optional is true, the
+// "=value" part can be omitted.
+//
+// Returns the value of the flag, or NULL if the parsing failed.
+static const char* ParseGoogleMockFlagValue(const char* str,
+ const char* flag,
+ bool def_optional) {
+ // str and flag must not be NULL.
+ if (str == NULL || flag == NULL) return NULL;
+
+ // The flag must start with "--gmock_".
+ const String flag_str = String::Format("--gmock_%s", flag);
+ const size_t flag_len = flag_str.length();
+ if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL;
+
+ // Skips the flag name.
+ const char* flag_end = str + flag_len;
+
+ // When def_optional is true, it's OK to not have a "=value" part.
+ if (def_optional && (flag_end[0] == '\0')) {
+ return flag_end;
+ }
+
+ // If def_optional is true and there are more characters after the
+ // flag name, or if def_optional is false, there must be a '=' after
+ // the flag name.
+ if (flag_end[0] != '=') return NULL;
+
+ // Returns the string after "=".
+ return flag_end + 1;
+}
+
+// Parses a string for a Google Mock bool flag, in the form of
+// "--gmock_flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+static bool ParseGoogleMockBoolFlag(const char* str, const char* flag,
+ bool* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseGoogleMockFlagValue(str, flag, true);
+
+ // Aborts if the parsing failed.
+ if (value_str == NULL) return false;
+
+ // Converts the string value to a bool.
+ *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F');
+ return true;
+}
+
+// Parses a string for a Google Mock string flag, in the form of
+// "--gmock_flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+static bool ParseGoogleMockStringFlag(const char* str, const char* flag,
+ String* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseGoogleMockFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == NULL) return false;
+
+ // Sets *value to the value of the flag.
+ *value = value_str;
+ return true;
+}
+
+// The internal implementation of InitGoogleMock().
+//
+// The type parameter CharType can be instantiated to either char or
+// wchar_t.
+template <typename CharType>
+void InitGoogleMockImpl(int* argc, CharType** argv) {
+ // Makes sure Google Test is initialized. InitGoogleTest() is
+ // idempotent, so it's fine if the user has already called it.
+ InitGoogleTest(argc, argv);
+ if (*argc <= 0) return;
+
+ for (int i = 1; i != *argc; i++) {
+ const String arg_string = StreamableToString(argv[i]);
+ const char* const arg = arg_string.c_str();
+
+ // Do we see a Google Mock flag?
+ if (ParseGoogleMockBoolFlag(arg, "catch_leaked_mocks",
+ &GMOCK_FLAG(catch_leaked_mocks)) ||
+ ParseGoogleMockStringFlag(arg, "verbose", &GMOCK_FLAG(verbose))) {
+ // Yes. Shift the remainder of the argv list left by one. Note
+ // that argv has (*argc + 1) elements, the last one always being
+ // NULL. The following loop moves the trailing NULL element as
+ // well.
+ for (int j = i; j != *argc; j++) {
+ argv[j] = argv[j + 1];
+ }
+
+ // Decrements the argument count.
+ (*argc)--;
+
+ // We also need to decrement the iterator as we just removed
+ // an element.
+ i--;
+ }
+ }
+}
+
+} // namespace internal
+
+// Initializes Google Mock. This must be called before running the
+// tests. In particular, it parses a command line for the flags that
+// Google Mock recognizes. Whenever a Google Mock flag is seen, it is
+// removed from argv, and *argc is decremented.
+//
+// No value is returned. Instead, the Google Mock flag variables are
+// updated.
+//
+// Since Google Test is needed for Google Mock to work, this function
+// also initializes Google Test and parses its flags, if that hasn't
+// been done.
+void InitGoogleMock(int* argc, char** argv) {
+ internal::InitGoogleMockImpl(argc, argv);
+}
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+void InitGoogleMock(int* argc, wchar_t** argv) {
+ internal::InitGoogleMockImpl(argc, argv);
+}
+
+} // namespace testing
diff --git a/internal/ceres/gmock_main.cc b/internal/ceres/gmock_main.cc
new file mode 100644
index 0000000..92b74d2
--- /dev/null
+++ b/internal/ceres/gmock_main.cc
@@ -0,0 +1,62 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+#include <iostream>
+#include "gflags/gflags.h"
+#include "glog/logging.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+// NOTE(keir): This flag is normally part of gtest within Google but isn't in
+// the open source Google Test, since it is build-system dependent. However for
+// Ceres this is needed for our tests. Add the new flag here.
+DEFINE_string(test_srcdir, "", "The location of the source code.");
+
+// MS C++ compiler/linker has a bug on Windows (not on Windows CE), which
+// causes a link error when _tmain is defined in a static library and UNICODE
+// is enabled. For this reason instead of _tmain, main function is used on
+// Windows. See the following link to track the current status of this bug:
+// http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=394464 // NOLINT
+#if GTEST_OS_WINDOWS_MOBILE
+# include <tchar.h> // NOLINT
+
+int _tmain(int argc, TCHAR** argv) {
+#else
+int main(int argc, char** argv) {
+#endif // GTEST_OS_WINDOWS_MOBILE
+ google::ParseCommandLineFlags(&argc, &argv, true);
+ google::InitGoogleLogging(argv[0]);
+ // Since Google Mock depends on Google Test, InitGoogleMock() is
+ // also responsible for initializing Google Test. Therefore there's
+ // no need for calling testing::InitGoogleTest() separately.
+ testing::InitGoogleMock(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/internal/ceres/gradient_checker_test.cc b/internal/ceres/gradient_checker_test.cc
new file mode 100644
index 0000000..cf7ee20
--- /dev/null
+++ b/internal/ceres/gradient_checker_test.cc
@@ -0,0 +1,193 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wjr@google.com (William Rucklidge)
+//
+// This file contains tests for the GradientChecker class.
+
+#include "ceres/gradient_checker.h"
+
+#include <cmath>
+#include <cstdlib>
+#include <glog/logging.h>
+#include <vector>
+
+#include "ceres/cost_function.h"
+#include "ceres/random.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// We pick a (non-quadratic) function whose derivative are easy:
+//
+// f = exp(- a' x).
+// df = - f a.
+//
+// where 'a' is a vector of the same size as 'x'. In the block
+// version, they are both block vectors, of course.
+class GoodTestTerm : public CostFunction {
+ public:
+ GoodTestTerm(int arity, int const *dim) : arity_(arity) {
+ // Make 'arity' random vectors.
+ a_.resize(arity_);
+ for (int j = 0; j < arity_; ++j) {
+ a_[j].resize(dim[j]);
+ for (int u = 0; u < dim[j]; ++u) {
+ a_[j][u] = 2.0 * RandDouble() - 1.0;
+ }
+ }
+
+ for (int i = 0; i < arity_; i++) {
+ mutable_parameter_block_sizes()->push_back(dim[i]);
+ }
+ set_num_residuals(1);
+ }
+
+ bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ // Compute a . x.
+ double ax = 0;
+ for (int j = 0; j < arity_; ++j) {
+ for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
+ ax += a_[j][u] * parameters[j][u];
+ }
+ }
+
+ // This is the cost, but also appears as a factor
+ // in the derivatives.
+ double f = *residuals = exp(-ax);
+
+ // Accumulate 1st order derivatives.
+ if (jacobians) {
+ for (int j = 0; j < arity_; ++j) {
+ if (jacobians[j]) {
+ for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
+ // See comments before class.
+ jacobians[j][u] = - f * a_[j][u];
+ }
+ }
+ }
+ }
+
+ return true;
+ }
+
+ private:
+ int arity_;
+ vector<vector<double> > a_; // our vectors.
+};
+
+class BadTestTerm : public CostFunction {
+ public:
+ BadTestTerm(int arity, int const *dim) : arity_(arity) {
+ // Make 'arity' random vectors.
+ a_.resize(arity_);
+ for (int j = 0; j < arity_; ++j) {
+ a_[j].resize(dim[j]);
+ for (int u = 0; u < dim[j]; ++u) {
+ a_[j][u] = 2.0 * RandDouble() - 1.0;
+ }
+ }
+
+ for (int i = 0; i < arity_; i++) {
+ mutable_parameter_block_sizes()->push_back(dim[i]);
+ }
+ set_num_residuals(1);
+ }
+
+ bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ // Compute a . x.
+ double ax = 0;
+ for (int j = 0; j < arity_; ++j) {
+ for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
+ ax += a_[j][u] * parameters[j][u];
+ }
+ }
+
+ // This is the cost, but also appears as a factor
+ // in the derivatives.
+ double f = *residuals = exp(-ax);
+
+ // Accumulate 1st order derivatives.
+ if (jacobians) {
+ for (int j = 0; j < arity_; ++j) {
+ if (jacobians[j]) {
+ for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
+ // See comments before class.
+ jacobians[j][u] = - f * a_[j][u] + 0.001;
+ }
+ }
+ }
+ }
+
+ return true;
+ }
+
+ private:
+ int arity_;
+ vector<vector<double> > a_; // our vectors.
+};
+
+TEST(GradientChecker, SmokeTest) {
+ srand(5);
+
+ // Test with 3 blocks of size 2, 3 and 4.
+ int const arity = 3;
+ int const dim[arity] = { 2, 3, 4 };
+
+ // Make a random set of blocks.
+ FixedArray<double*> parameters(arity);
+ for (int j = 0; j < arity; ++j) {
+ parameters[j] = new double[dim[j]];
+ for (int u = 0; u < dim[j]; ++u) {
+ parameters[j][u] = 2.0 * RandDouble() - 1.0;
+ }
+ }
+
+ // Make a term and probe it.
+ GoodTestTerm good_term(arity, dim);
+ typedef GradientChecker<GoodTestTerm, 1, 2, 3, 4> GoodTermGradientChecker;
+ EXPECT_TRUE(GoodTermGradientChecker::Probe(
+ parameters.get(), 1e-6, &good_term, NULL));
+
+ BadTestTerm bad_term(arity, dim);
+ typedef GradientChecker<BadTestTerm, 1, 2, 3, 4> BadTermGradientChecker;
+ EXPECT_FALSE(BadTermGradientChecker::Probe(
+ parameters.get(), 1e-6, &bad_term, NULL));
+
+ for (int j = 0; j < arity; j++) {
+ delete[] parameters[j];
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/gradient_checking_cost_function.cc b/internal/ceres/gradient_checking_cost_function.cc
new file mode 100644
index 0000000..3edf95d
--- /dev/null
+++ b/internal/ceres/gradient_checking_cost_function.cc
@@ -0,0 +1,308 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/gradient_checking_cost_function.h"
+
+#include <algorithm>
+#include <cmath>
+#include <numeric>
+#include <string>
+#include <vector>
+
+#include "ceres/cost_function.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/parameter_block.h"
+#include "ceres/problem.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/runtime_numeric_diff_cost_function.h"
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// True if x and y have an absolute relative difference less than
+// relative_precision and false otherwise. Stores the relative and absolute
+// difference in relative/absolute_error if non-NULL.
+bool IsClose(double x, double y, double relative_precision,
+ double *relative_error,
+ double *absolute_error) {
+ double local_absolute_error;
+ double local_relative_error;
+ if (!absolute_error) {
+ absolute_error = &local_absolute_error;
+ }
+ if (!relative_error) {
+ relative_error = &local_relative_error;
+ }
+ *absolute_error = fabs(x - y);
+ *relative_error = *absolute_error / max(fabs(x), fabs(y));
+ if (x == 0 || y == 0) {
+ // If x or y is exactly zero, then relative difference doesn't have any
+ // meaning. Take the absolute difference instead.
+ *relative_error = *absolute_error;
+ }
+ return fabs(*relative_error) < fabs(relative_precision);
+}
+
+class GradientCheckingCostFunction : public CostFunction {
+ public:
+ GradientCheckingCostFunction(const CostFunction* function,
+ double relative_step_size,
+ double relative_precision,
+ const string& extra_info)
+ : function_(function),
+ finite_diff_cost_function_(
+ CreateRuntimeNumericDiffCostFunction(function,
+ CENTRAL,
+ relative_step_size)),
+ relative_precision_(relative_precision),
+ extra_info_(extra_info) {
+ *mutable_parameter_block_sizes() = function->parameter_block_sizes();
+ set_num_residuals(function->num_residuals());
+ }
+
+ virtual ~GradientCheckingCostFunction() { }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ if (!jacobians) {
+ // Nothing to check in this case; just forward.
+ return function_->Evaluate(parameters, residuals, NULL);
+ }
+
+ int num_residuals = function_->num_residuals();
+
+ // Make space for the jacobians of the two methods.
+ const vector<int16>& block_sizes = function_->parameter_block_sizes();
+ vector<Matrix> term_jacobians(block_sizes.size());
+ vector<Matrix> finite_difference_jacobians(block_sizes.size());
+ vector<double*> term_jacobian_pointers(block_sizes.size());
+ vector<double*> finite_difference_jacobian_pointers(block_sizes.size());
+ for (int i = 0; i < block_sizes.size(); i++) {
+ term_jacobians[i].resize(num_residuals, block_sizes[i]);
+ term_jacobian_pointers[i] = term_jacobians[i].data();
+ finite_difference_jacobians[i].resize(num_residuals, block_sizes[i]);
+ finite_difference_jacobian_pointers[i] =
+ finite_difference_jacobians[i].data();
+ }
+
+ // Evaluate the derivative using the user supplied code.
+ if (!function_->Evaluate(parameters,
+ residuals,
+ &term_jacobian_pointers[0])) {
+ LOG(WARNING) << "Function evaluation failed.";
+ return false;
+ }
+
+ // Evaluate the derivative using numeric derivatives.
+ finite_diff_cost_function_->Evaluate(
+ parameters,
+ residuals,
+ &finite_difference_jacobian_pointers[0]);
+
+ // See if any elements have relative error larger than the threshold.
+ int num_bad_jacobian_components = 0;
+ double worst_relative_error = 0;
+
+ // Accumulate the error message for all the jacobians, since it won't get
+ // output if there are no bad jacobian components.
+ string m;
+ for (int k = 0; k < block_sizes.size(); k++) {
+ // Copy the original jacobian blocks into the jacobians array.
+ if (jacobians[k] != NULL) {
+ MatrixRef(jacobians[k],
+ term_jacobians[k].rows(),
+ term_jacobians[k].cols()) = term_jacobians[k];
+ }
+
+ StringAppendF(&m,
+ "========== "
+ "Jacobian for " "block %d: (%ld by %ld)) "
+ "==========\n",
+ k,
+ static_cast<long>(term_jacobians[k].rows()),
+ static_cast<long>(term_jacobians[k].cols()));
+ // The funny spacing creates appropriately aligned column headers.
+ m += " block row col user dx/dy num diff dx/dy "
+ "abs error relative error parameter residual\n";
+
+ for (int i = 0; i < term_jacobians[k].rows(); i++) {
+ for (int j = 0; j < term_jacobians[k].cols(); j++) {
+ double term_jacobian = term_jacobians[k](i, j);
+ double finite_jacobian = finite_difference_jacobians[k](i, j);
+ double relative_error, absolute_error;
+ bool bad_jacobian_entry =
+ !IsClose(term_jacobian,
+ finite_jacobian,
+ relative_precision_,
+ &relative_error,
+ &absolute_error);
+ worst_relative_error = std::max(worst_relative_error,
+ relative_error);
+
+ StringAppendF(&m, "%6d %4d %4d %17g %17g %17g %17g %17g %17g",
+ k, i, j,
+ term_jacobian, finite_jacobian,
+ absolute_error, relative_error,
+ parameters[k][j],
+ residuals[i]);
+
+ if (bad_jacobian_entry) {
+ num_bad_jacobian_components++;
+ StringAppendF(
+ &m, " ------ (%d,%d,%d) Relative error worse than %g",
+ k, i, j, relative_precision_);
+ }
+ m += "\n";
+ }
+ }
+ }
+
+ // Since there were some bad errors, dump comprehensive debug info.
+ if (num_bad_jacobian_components) {
+ string header = StringPrintf("Detected %d bad jacobian component(s). "
+ "Worst relative error was %g.\n",
+ num_bad_jacobian_components,
+ worst_relative_error);
+ if (!extra_info_.empty()) {
+ header += "Extra info for this residual: " + extra_info_ + "\n";
+ }
+ LOG(WARNING) << "\n" << header << m;
+ }
+ return true;
+ }
+
+ private:
+ const CostFunction* function_;
+ internal::scoped_ptr<CostFunction> finite_diff_cost_function_;
+ double relative_precision_;
+ string extra_info_;
+};
+
+} // namespace
+
+CostFunction *CreateGradientCheckingCostFunction(
+ const CostFunction *cost_function,
+ double relative_step_size,
+ double relative_precision,
+ const string& extra_info) {
+ return new GradientCheckingCostFunction(cost_function,
+ relative_step_size,
+ relative_precision,
+ extra_info);
+}
+
+ProblemImpl* CreateGradientCheckingProblemImpl(ProblemImpl* problem_impl,
+ double relative_step_size,
+ double relative_precision) {
+ // We create new CostFunctions by wrapping the original CostFunction
+ // in a gradient checking CostFunction. So its okay for the
+ // ProblemImpl to take ownership of it and destroy it. The
+ // LossFunctions and LocalParameterizations are reused and since
+ // they are owned by problem_impl, gradient_checking_problem_impl
+ // should not take ownership of it.
+ Problem::Options gradient_checking_problem_options;
+ gradient_checking_problem_options.cost_function_ownership = TAKE_OWNERSHIP;
+ gradient_checking_problem_options.loss_function_ownership =
+ DO_NOT_TAKE_OWNERSHIP;
+ gradient_checking_problem_options.local_parameterization_ownership =
+ DO_NOT_TAKE_OWNERSHIP;
+
+ ProblemImpl* gradient_checking_problem_impl = new ProblemImpl(
+ gradient_checking_problem_options);
+
+ Program* program = problem_impl->mutable_program();
+
+ // For every ParameterBlock in problem_impl, create a new parameter
+ // block with the same local parameterization and constancy.
+ const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
+ for (int i = 0; i < parameter_blocks.size(); ++i) {
+ ParameterBlock* parameter_block = parameter_blocks[i];
+ gradient_checking_problem_impl->AddParameterBlock(
+ parameter_block->mutable_user_state(),
+ parameter_block->Size(),
+ parameter_block->mutable_local_parameterization());
+
+ if (parameter_block->IsConstant()) {
+ gradient_checking_problem_impl->SetParameterBlockConstant(
+ parameter_block->mutable_user_state());
+ }
+ }
+
+ // For every ResidualBlock in problem_impl, create a new
+ // ResidualBlock by wrapping its CostFunction inside a
+ // GradientCheckingCostFunction.
+ const vector<ResidualBlock*>& residual_blocks = program->residual_blocks();
+ for (int i = 0; i < residual_blocks.size(); ++i) {
+ ResidualBlock* residual_block = residual_blocks[i];
+
+ // Build a human readable string which identifies the
+ // ResidualBlock. This is used by the GradientCheckingCostFunction
+ // when logging debugging information.
+ string extra_info = StringPrintf(
+ "Residual block id %d; depends on parameters [", i);
+ vector<double*> parameter_blocks;
+ for (int j = 0; j < residual_block->NumParameterBlocks(); ++j) {
+ ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
+ parameter_blocks.push_back(parameter_block->mutable_user_state());
+ StringAppendF(&extra_info, "%p", parameter_block->mutable_user_state());
+ extra_info += (j < residual_block->NumParameterBlocks() - 1) ? ", " : "]";
+ }
+
+ // Wrap the original CostFunction in a GradientCheckingCostFunction.
+ CostFunction* gradient_checking_cost_function =
+ CreateGradientCheckingCostFunction(residual_block->cost_function(),
+ relative_step_size,
+ relative_precision,
+ extra_info);
+
+ // The const_cast is necessary because
+ // ProblemImpl::AddResidualBlock can potentially take ownership of
+ // the LossFunction, but in this case we are guaranteed that this
+ // will not be the case, so this const_cast is harmless.
+ gradient_checking_problem_impl->AddResidualBlock(
+ gradient_checking_cost_function,
+ const_cast<LossFunction*>(residual_block->loss_function()),
+ parameter_blocks);
+ }
+
+ return gradient_checking_problem_impl;
+}
+
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/gradient_checking_cost_function.h b/internal/ceres/gradient_checking_cost_function.h
new file mode 100644
index 0000000..d49c8e6
--- /dev/null
+++ b/internal/ceres/gradient_checking_cost_function.h
@@ -0,0 +1,85 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_GRADIENT_CHECKING_COST_FUNCTION_H_
+#define CERES_INTERNAL_GRADIENT_CHECKING_COST_FUNCTION_H_
+
+#include <string>
+
+#include "ceres/cost_function.h"
+
+namespace ceres {
+namespace internal {
+
+class ProblemImpl;
+
+// Creates a CostFunction that checks the jacobians that cost_function computes
+// with finite differences. Bad results are logged; required precision is
+// controlled by relative_precision and the numeric differentiation step size is
+// controlled with relative_step_size. See solver.h for a better explanation of
+// relative_step_size. Caller owns result.
+//
+// The condition enforced is that
+//
+// (J_actual(i, j) - J_numeric(i, j))
+// ------------------------------------ < relative_precision
+// max(J_actual(i, j), J_numeric(i, j))
+//
+// where J_actual(i, j) is the jacobian as computed by the supplied cost
+// function (by the user) and J_numeric is the jacobian as computed by finite
+// differences.
+//
+// Note: This is quite inefficient and is intended only for debugging.
+CostFunction* CreateGradientCheckingCostFunction(
+ const CostFunction* cost_function,
+ double relative_step_size,
+ double relative_precision,
+ const string& extra_info);
+
+// Create a new ProblemImpl object from the input problem_impl, where
+// each CostFunctions in problem_impl are wrapped inside a
+// GradientCheckingCostFunctions. This gives us a ProblemImpl object
+// which checks its derivatives against estimates from numeric
+// differentiation everytime a ResidualBlock is evaluated.
+//
+// relative_step_size and relative_precision are parameters to control
+// the numeric differentiation and the relative tolerance between the
+// jacobian computed by the CostFunctions in problem_impl and
+// jacobians obtained by numerically differentiating them. For more
+// details see the documentation for
+// CreateGradientCheckingCostFunction above.
+ProblemImpl* CreateGradientCheckingProblemImpl(ProblemImpl* problem_impl,
+ double relative_step_size,
+ double relative_precision);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_GRADIENT_CHECKING_COST_FUNCTION_H_
diff --git a/internal/ceres/gradient_checking_cost_function_test.cc b/internal/ceres/gradient_checking_cost_function_test.cc
new file mode 100644
index 0000000..ac06503
--- /dev/null
+++ b/internal/ceres/gradient_checking_cost_function_test.cc
@@ -0,0 +1,417 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/gradient_checking_cost_function.h"
+
+#include <cmath>
+#include <vector>
+#include "ceres/cost_function.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/loss_function.h"
+#include "ceres/parameter_block.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/random.h"
+#include "ceres/residual_block.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gmock/gmock.h"
+#include "gmock/mock-log.h"
+#include "gtest/gtest.h"
+
+using testing::AllOf;
+using testing::AnyNumber;
+using testing::HasSubstr;
+using testing::ScopedMockLog;
+using testing::_;
+
+namespace ceres {
+namespace internal {
+
+// Pick a (non-quadratic) function whose derivative are easy:
+//
+// f = exp(- a' x).
+// df = - f a.
+//
+// where 'a' is a vector of the same size as 'x'. In the block
+// version, they are both block vectors, of course.
+template<int bad_block = 1, int bad_variable = 2>
+class TestTerm : public CostFunction {
+ public:
+ // The constructor of this function needs to know the number
+ // of blocks desired, and the size of each block.
+ TestTerm(int arity, int const *dim) : arity_(arity) {
+ // Make 'arity' random vectors.
+ a_.resize(arity_);
+ for (int j = 0; j < arity_; ++j) {
+ a_[j].resize(dim[j]);
+ for (int u = 0; u < dim[j]; ++u) {
+ a_[j][u] = 2.0 * RandDouble() - 1.0;
+ }
+ }
+
+ for (int i = 0; i < arity_; i++) {
+ mutable_parameter_block_sizes()->push_back(dim[i]);
+ }
+ set_num_residuals(1);
+ }
+
+ bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ // Compute a . x.
+ double ax = 0;
+ for (int j = 0; j < arity_; ++j) {
+ for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
+ ax += a_[j][u] * parameters[j][u];
+ }
+ }
+
+ // This is the cost, but also appears as a factor
+ // in the derivatives.
+ double f = *residuals = exp(-ax);
+
+ // Accumulate 1st order derivatives.
+ if (jacobians) {
+ for (int j = 0; j < arity_; ++j) {
+ if (jacobians[j]) {
+ for (int u = 0; u < parameter_block_sizes()[j]; ++u) {
+ // See comments before class.
+ jacobians[j][u] = - f * a_[j][u];
+
+ if (bad_block == j && bad_variable == u) {
+ // Whoopsiedoopsie! Deliberately introduce a faulty jacobian entry
+ // like what happens when users make an error in their jacobian
+ // computations. This should get detected.
+ LOG(INFO) << "Poisoning jacobian for parameter block " << j
+ << ", row 0, column " << u;
+ jacobians[j][u] += 500;
+ }
+ }
+ }
+ }
+ }
+
+ return true;
+ }
+
+ private:
+ int arity_;
+ vector<vector<double> > a_;
+};
+
+TEST(GradientCheckingCostFunction, ResidualsAndJacobiansArePreservedTest) {
+ srand(5);
+
+ // Test with 3 blocks of size 2, 3 and 4.
+ int const arity = 3;
+ int const dim[arity] = { 2, 3, 4 };
+
+ // Make a random set of blocks.
+ vector<double*> parameters(arity);
+ for (int j = 0; j < arity; ++j) {
+ parameters[j] = new double[dim[j]];
+ for (int u = 0; u < dim[j]; ++u) {
+ parameters[j][u] = 2.0 * RandDouble() - 1.0;
+ }
+ }
+
+ double original_residual;
+ double residual;
+ vector<double*> original_jacobians(arity);
+ vector<double*> jacobians(arity);
+
+ for (int j = 0; j < arity; ++j) {
+ // Since residual is one dimensional the jacobians have the same
+ // size as the parameter blocks.
+ jacobians[j] = new double[dim[j]];
+ original_jacobians[j] = new double[dim[j]];
+ }
+
+ const double kRelativeStepSize = 1e-6;
+ const double kRelativePrecision = 1e-4;
+
+ TestTerm<-1, -1> term(arity, dim);
+ scoped_ptr<CostFunction> gradient_checking_cost_function(
+ CreateGradientCheckingCostFunction(&term,
+ kRelativeStepSize,
+ kRelativePrecision,
+ "Ignored."));
+ term.Evaluate(&parameters[0],
+ &original_residual,
+ &original_jacobians[0]);
+
+ gradient_checking_cost_function->Evaluate(&parameters[0],
+ &residual,
+ &jacobians[0]);
+ EXPECT_EQ(original_residual, residual);
+
+ for (int j = 0; j < arity; j++) {
+ for (int k = 0; k < dim[j]; ++k) {
+ EXPECT_EQ(original_jacobians[j][k], jacobians[j][k]);
+ }
+
+ delete[] parameters[j];
+ delete[] jacobians[j];
+ delete[] original_jacobians[j];
+ }
+}
+
+TEST(GradientCheckingCostFunction, SmokeTest) {
+ srand(5);
+
+ // Test with 3 blocks of size 2, 3 and 4.
+ int const arity = 3;
+ int const dim[arity] = { 2, 3, 4 };
+
+ // Make a random set of blocks.
+ vector<double*> parameters(arity);
+ for (int j = 0; j < arity; ++j) {
+ parameters[j] = new double[dim[j]];
+ for (int u = 0; u < dim[j]; ++u) {
+ parameters[j][u] = 2.0 * RandDouble() - 1.0;
+ }
+ }
+
+ double residual;
+ vector<double*> jacobians(arity);
+ for (int j = 0; j < arity; ++j) {
+ // Since residual is one dimensional the jacobians have the same size as the
+ // parameter blocks.
+ jacobians[j] = new double[dim[j]];
+ }
+
+ const double kRelativeStepSize = 1e-6;
+ const double kRelativePrecision = 1e-4;
+
+ // Should have one term that's bad, causing everything to get dumped.
+ LOG(INFO) << "Bad gradient";
+ {
+ TestTerm<1, 2> term(arity, dim);
+ scoped_ptr<CostFunction> gradient_checking_cost_function(
+ CreateGradientCheckingCostFunction(&term,
+ kRelativeStepSize,
+ kRelativePrecision,
+ "Fuzzy bananas"));
+
+ ScopedMockLog log;
+ EXPECT_CALL(log, Log(_, _, _)).Times(AnyNumber());
+ EXPECT_CALL(log, Log(WARNING, _,
+ AllOf(HasSubstr("(1,0,2) Relative error worse than"),
+ HasSubstr("Fuzzy bananas"))));
+
+ gradient_checking_cost_function->Evaluate(&parameters[0],
+ &residual,
+ &jacobians[0]);
+ }
+
+ // The gradient is correct, so no errors are reported.
+ LOG(INFO) << "Good gradient";
+ {
+ TestTerm<-1, -1> term(arity, dim);
+ scoped_ptr<CostFunction> gradient_checking_cost_function(
+ CreateGradientCheckingCostFunction(&term,
+ kRelativeStepSize,
+ kRelativePrecision,
+ "Ignored."));
+
+ ScopedMockLog log;
+ EXPECT_CALL(log, Log(_, _, _)).Times(0);
+
+ gradient_checking_cost_function->Evaluate(&parameters[0],
+ &residual,
+ &jacobians[0]);
+ }
+
+ for (int j = 0; j < arity; j++) {
+ delete[] parameters[j];
+ delete[] jacobians[j];
+ }
+}
+
+// The following three classes are for the purposes of defining
+// function signatures. They have dummy Evaluate functions.
+
+// Trivial cost function that accepts a single argument.
+class UnaryCostFunction : public CostFunction {
+ public:
+ UnaryCostFunction(int num_residuals, int16 parameter_block_size) {
+ set_num_residuals(num_residuals);
+ mutable_parameter_block_sizes()->push_back(parameter_block_size);
+ }
+ virtual ~UnaryCostFunction() {}
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ for (int i = 0; i < num_residuals(); ++i) {
+ residuals[i] = 1;
+ }
+ return true;
+ }
+};
+
+// Trivial cost function that accepts two arguments.
+class BinaryCostFunction: public CostFunction {
+ public:
+ BinaryCostFunction(int num_residuals,
+ int16 parameter_block1_size,
+ int16 parameter_block2_size) {
+ set_num_residuals(num_residuals);
+ mutable_parameter_block_sizes()->push_back(parameter_block1_size);
+ mutable_parameter_block_sizes()->push_back(parameter_block2_size);
+ }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ for (int i = 0; i < num_residuals(); ++i) {
+ residuals[i] = 2;
+ }
+ return true;
+ }
+};
+
+// Trivial cost function that accepts three arguments.
+class TernaryCostFunction: public CostFunction {
+ public:
+ TernaryCostFunction(int num_residuals,
+ int16 parameter_block1_size,
+ int16 parameter_block2_size,
+ int16 parameter_block3_size) {
+ set_num_residuals(num_residuals);
+ mutable_parameter_block_sizes()->push_back(parameter_block1_size);
+ mutable_parameter_block_sizes()->push_back(parameter_block2_size);
+ mutable_parameter_block_sizes()->push_back(parameter_block3_size);
+ }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ for (int i = 0; i < num_residuals(); ++i) {
+ residuals[i] = 3;
+ }
+ return true;
+ }
+};
+
+// Verify that the two ParameterBlocks are formed from the same user
+// array and have the same LocalParameterization object.
+void ParameterBlocksAreEquivalent(const ParameterBlock* left,
+ const ParameterBlock* right) {
+ CHECK_NOTNULL(left);
+ CHECK_NOTNULL(right);
+ EXPECT_EQ(left->user_state(), right->user_state());
+ EXPECT_EQ(left->Size(), right->Size());
+ EXPECT_EQ(left->Size(), right->Size());
+ EXPECT_EQ(left->LocalSize(), right->LocalSize());
+ EXPECT_EQ(left->local_parameterization(), right->local_parameterization());
+ EXPECT_EQ(left->IsConstant(), right->IsConstant());
+}
+
+TEST(GradientCheckingProblemImpl, ProblemDimensionsMatch) {
+ // Parameter blocks with arbitrarily chosen initial values.
+ double x[] = {1.0, 2.0, 3.0};
+ double y[] = {4.0, 5.0, 6.0, 7.0};
+ double z[] = {8.0, 9.0, 10.0, 11.0, 12.0};
+ double w[] = {13.0, 14.0, 15.0, 16.0};
+
+ ProblemImpl problem_impl;
+ problem_impl.AddParameterBlock(x, 3);
+ problem_impl.AddParameterBlock(y, 4);
+ problem_impl.SetParameterBlockConstant(y);
+ problem_impl.AddParameterBlock(z, 5);
+ problem_impl.AddParameterBlock(w, 4, new QuaternionParameterization);
+ problem_impl.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+ problem_impl.AddResidualBlock(new BinaryCostFunction(6, 5, 4) ,
+ NULL, z, y);
+ problem_impl.AddResidualBlock(new BinaryCostFunction(3, 3, 5),
+ new TrivialLoss, x, z);
+ problem_impl.AddResidualBlock(new BinaryCostFunction(7, 5, 3),
+ NULL, z, x);
+ problem_impl.AddResidualBlock(new TernaryCostFunction(1, 5, 3, 4),
+ NULL, z, x, y);
+
+ scoped_ptr<ProblemImpl> gradient_checking_problem_impl(
+ CreateGradientCheckingProblemImpl(&problem_impl, 1.0, 1.0));
+
+ // The dimensions of the two problems match.
+ EXPECT_EQ(problem_impl.NumParameterBlocks(),
+ gradient_checking_problem_impl->NumParameterBlocks());
+ EXPECT_EQ(problem_impl.NumResidualBlocks(),
+ gradient_checking_problem_impl->NumResidualBlocks());
+
+ EXPECT_EQ(problem_impl.NumParameters(),
+ gradient_checking_problem_impl->NumParameters());
+ EXPECT_EQ(problem_impl.NumResiduals(),
+ gradient_checking_problem_impl->NumResiduals());
+
+ const Program& program = problem_impl.program();
+ const Program& gradient_checking_program =
+ gradient_checking_problem_impl->program();
+
+ // Since we added the ParameterBlocks and ResidualBlocks explicitly,
+ // they should be in the same order in the two programs. It is
+ // possible that may change due to implementation changes to
+ // Program. This is not exepected to be the case and writing code to
+ // anticipate that possibility not worth the extra complexity in
+ // this test.
+ for (int i = 0; i < program.parameter_blocks().size(); ++i) {
+ ParameterBlocksAreEquivalent(
+ program.parameter_blocks()[i],
+ gradient_checking_program.parameter_blocks()[i]);
+ }
+
+ for (int i = 0; i < program.residual_blocks().size(); ++i) {
+ // Compare the sizes of the two ResidualBlocks.
+ const ResidualBlock* original_residual_block =
+ program.residual_blocks()[i];
+ const ResidualBlock* new_residual_block =
+ gradient_checking_program.residual_blocks()[i];
+ EXPECT_EQ(original_residual_block->NumParameterBlocks(),
+ new_residual_block->NumParameterBlocks());
+ EXPECT_EQ(original_residual_block->NumResiduals(),
+ new_residual_block->NumResiduals());
+ EXPECT_EQ(original_residual_block->NumScratchDoublesForEvaluate(),
+ new_residual_block->NumScratchDoublesForEvaluate());
+
+ // Verify that the ParameterBlocks for the two residuals are equivalent.
+ for (int j = 0; j < original_residual_block->NumParameterBlocks(); ++j) {
+ ParameterBlocksAreEquivalent(
+ original_residual_block->parameter_blocks()[j],
+ new_residual_block->parameter_blocks()[j]);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/graph.h b/internal/ceres/graph.h
new file mode 100644
index 0000000..e080489
--- /dev/null
+++ b/internal/ceres/graph.h
@@ -0,0 +1,160 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_GRAPH_H_
+#define CERES_INTERNAL_GRAPH_H_
+
+#include <limits>
+#include <glog/logging.h>
+#include "ceres/integral_types.h"
+#include "ceres/map_util.h"
+#include "ceres/collections_port.h"
+#include "ceres/internal/macros.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+// A weighted undirected graph templated over the vertex ids. Vertex
+// should be hashable and comparable.
+template <typename Vertex>
+class Graph {
+ public:
+ Graph() {}
+
+ // Add a weighted vertex. If the vertex already exists in the graph,
+ // its weight is set to the new weight.
+ void AddVertex(const Vertex& vertex, double weight) {
+ if (vertices_.find(vertex) == vertices_.end()) {
+ vertices_.insert(vertex);
+ edges_[vertex] = HashSet<Vertex>();
+ }
+ vertex_weights_[vertex] = weight;
+ }
+
+ // Uses weight = 1.0. If vertex already exists, its weight is set to
+ // 1.0.
+ void AddVertex(const Vertex& vertex) {
+ AddVertex(vertex, 1.0);
+ }
+
+ bool RemoveVertex(const Vertex& vertex) {
+ if (vertices_.find(vertex) == vertices_.end()) {
+ return false;
+ }
+
+ vertices_.erase(vertex);
+ vertex_weights_.erase(vertex);
+ const HashSet<Vertex>& sinks = edges_[vertex];
+ for (typename HashSet<Vertex>::const_iterator it = sinks.begin();
+ it != sinks.end(); ++it) {
+ if (vertex < *it) {
+ edge_weights_.erase(make_pair(vertex, *it));
+ } else {
+ edge_weights_.erase(make_pair(*it, vertex));
+ }
+ edges_[*it].erase(vertex);
+ }
+
+ edges_.erase(vertex);
+ return true;
+ }
+
+ // Add a weighted edge between the vertex1 and vertex2. Calling
+ // AddEdge on a pair of vertices which do not exist in the graph yet
+ // will result in undefined behavior.
+ //
+ // It is legal to call this method repeatedly for the same set of
+ // vertices.
+ void AddEdge(const Vertex& vertex1, const Vertex& vertex2, double weight) {
+ DCHECK(vertices_.find(vertex1) != vertices_.end());
+ DCHECK(vertices_.find(vertex2) != vertices_.end());
+
+ if (edges_[vertex1].insert(vertex2).second) {
+ edges_[vertex2].insert(vertex1);
+ }
+
+ if (vertex1 < vertex2) {
+ edge_weights_[make_pair(vertex1, vertex2)] = weight;
+ } else {
+ edge_weights_[make_pair(vertex2, vertex1)] = weight;
+ }
+ }
+
+ // Uses weight = 1.0.
+ void AddEdge(const Vertex& vertex1, const Vertex& vertex2) {
+ AddEdge(vertex1, vertex2, 1.0);
+ }
+
+ // Calling VertexWeight on a vertex not in the graph will result in
+ // undefined behavior.
+ double VertexWeight(const Vertex& vertex) const {
+ return FindOrDie(vertex_weights_, vertex);
+ }
+
+ // Calling EdgeWeight on a pair of vertices where either one of the
+ // vertices is not present in the graph will result in undefined
+ // behaviour. If there is no edge connecting vertex1 and vertex2,
+ // the edge weight is zero.
+ double EdgeWeight(const Vertex& vertex1, const Vertex& vertex2) const {
+ if (vertex1 < vertex2) {
+ return FindWithDefault(edge_weights_, make_pair(vertex1, vertex2), 0.0);
+ } else {
+ return FindWithDefault(edge_weights_, make_pair(vertex2, vertex1), 0.0);
+ }
+ }
+
+ // Calling Neighbors on a vertex not in the graph will result in
+ // undefined behaviour.
+ const HashSet<Vertex>& Neighbors(const Vertex& vertex) const {
+ return FindOrDie(edges_, vertex);
+ }
+
+ const HashSet<Vertex>& vertices() const {
+ return vertices_;
+ }
+
+ static double InvalidWeight() {
+ return std::numeric_limits<double>::quiet_NaN();
+ };
+
+ private:
+ HashSet<Vertex> vertices_;
+ HashMap<Vertex, double> vertex_weights_;
+ HashMap<Vertex, HashSet<Vertex> > edges_;
+ HashMap<pair<Vertex, Vertex>, double> edge_weights_;
+
+ CERES_DISALLOW_COPY_AND_ASSIGN(Graph);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_GRAPH_H_
diff --git a/internal/ceres/graph_algorithms.h b/internal/ceres/graph_algorithms.h
new file mode 100644
index 0000000..3b42d93
--- /dev/null
+++ b/internal/ceres/graph_algorithms.h
@@ -0,0 +1,270 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Various algorithms that operate on undirected graphs.
+
+#ifndef CERES_INTERNAL_GRAPH_ALGORITHMS_H_
+#define CERES_INTERNAL_GRAPH_ALGORITHMS_H_
+
+#include <vector>
+#include <glog/logging.h>
+#include "ceres/collections_port.h"
+#include "ceres/graph.h"
+
+namespace ceres {
+namespace internal {
+
+// Compare two vertices of a graph by their degrees.
+template <typename Vertex>
+class VertexDegreeLessThan {
+ public:
+ explicit VertexDegreeLessThan(const Graph<Vertex>& graph)
+ : graph_(graph) {}
+
+ bool operator()(const Vertex& lhs, const Vertex& rhs) const {
+ if (graph_.Neighbors(lhs).size() == graph_.Neighbors(rhs).size()) {
+ return lhs < rhs;
+ }
+ return graph_.Neighbors(lhs).size() < graph_.Neighbors(rhs).size();
+ }
+
+ private:
+ const Graph<Vertex>& graph_;
+};
+
+// Order the vertices of a graph using its (approximately) largest
+// independent set, where an independent set of a graph is a set of
+// vertices that have no edges connecting them. The maximum
+// independent set problem is NP-Hard, but there are effective
+// approximation algorithms available. The implementation here uses a
+// breadth first search that explores the vertices in order of
+// increasing degree. The same idea is used by Saad & Li in "MIQR: A
+// multilevel incomplete QR preconditioner for large sparse
+// least-squares problems", SIMAX, 2007.
+//
+// Given a undirected graph G(V,E), the algorithm is a greedy BFS
+// search where the vertices are explored in increasing order of their
+// degree. The output vector ordering contains elements of S in
+// increasing order of their degree, followed by elements of V - S in
+// increasing order of degree. The return value of the function is the
+// cardinality of S.
+template <typename Vertex>
+int IndependentSetOrdering(const Graph<Vertex>& graph,
+ vector<Vertex>* ordering) {
+ const HashSet<Vertex>& vertices = graph.vertices();
+ const int num_vertices = vertices.size();
+
+ CHECK_NOTNULL(ordering);
+ ordering->clear();
+ ordering->reserve(num_vertices);
+
+ // Colors for labeling the graph during the BFS.
+ const char kWhite = 0;
+ const char kGrey = 1;
+ const char kBlack = 2;
+
+ // Mark all vertices white.
+ HashMap<Vertex, char> vertex_color;
+ vector<Vertex> vertex_queue;
+ for (typename HashSet<Vertex>::const_iterator it = vertices.begin();
+ it != vertices.end();
+ ++it) {
+ vertex_color[*it] = kWhite;
+ vertex_queue.push_back(*it);
+ }
+
+
+ sort(vertex_queue.begin(), vertex_queue.end(),
+ VertexDegreeLessThan<Vertex>(graph));
+
+ // Iterate over vertex_queue. Pick the first white vertex, add it
+ // to the independent set. Mark it black and its neighbors grey.
+ for (int i = 0; i < vertex_queue.size(); ++i) {
+ const Vertex& vertex = vertex_queue[i];
+ if (vertex_color[vertex] != kWhite) {
+ continue;
+ }
+
+ ordering->push_back(vertex);
+ vertex_color[vertex] = kBlack;
+ const HashSet<Vertex>& neighbors = graph.Neighbors(vertex);
+ for (typename HashSet<Vertex>::const_iterator it = neighbors.begin();
+ it != neighbors.end();
+ ++it) {
+ vertex_color[*it] = kGrey;
+ }
+ }
+
+ int independent_set_size = ordering->size();
+
+ // Iterate over the vertices and add all the grey vertices to the
+ // ordering. At this stage there should only be black or grey
+ // vertices in the graph.
+ for (typename vector<Vertex>::const_iterator it = vertex_queue.begin();
+ it != vertex_queue.end();
+ ++it) {
+ const Vertex vertex = *it;
+ DCHECK(vertex_color[vertex] != kWhite);
+ if (vertex_color[vertex] != kBlack) {
+ ordering->push_back(vertex);
+ }
+ }
+
+ CHECK_EQ(ordering->size(), num_vertices);
+ return independent_set_size;
+}
+
+// Find the connected component for a vertex implemented using the
+// find and update operation for disjoint-set. Recursively traverse
+// the disjoint set structure till you reach a vertex whose connected
+// component has the same id as the vertex itself. Along the way
+// update the connected components of all the vertices. This updating
+// is what gives this data structure its efficiency.
+template <typename Vertex>
+Vertex FindConnectedComponent(const Vertex& vertex,
+ HashMap<Vertex, Vertex>* union_find) {
+ typename HashMap<Vertex, Vertex>::iterator it = union_find->find(vertex);
+ DCHECK(it != union_find->end());
+ if (it->second != vertex) {
+ it->second = FindConnectedComponent(it->second, union_find);
+ }
+
+ return it->second;
+}
+
+// Compute a degree two constrained Maximum Spanning Tree/forest of
+// the input graph. Caller owns the result.
+//
+// Finding degree 2 spanning tree of a graph is not always
+// possible. For example a star graph, i.e. a graph with n-nodes
+// where one node is connected to the other n-1 nodes does not have
+// a any spanning trees of degree less than n-1.Even if such a tree
+// exists, finding such a tree is NP-Hard.
+
+// We get around both of these problems by using a greedy, degree
+// constrained variant of Kruskal's algorithm. We start with a graph
+// G_T with the same vertex set V as the input graph G(V,E) but an
+// empty edge set. We then iterate over the edges of G in decreasing
+// order of weight, adding them to G_T if doing so does not create a
+// cycle in G_T} and the degree of all the vertices in G_T remains
+// bounded by two. This O(|E|) algorithm results in a degree-2
+// spanning forest, or a collection of linear paths that span the
+// graph G.
+template <typename Vertex>
+Graph<Vertex>*
+Degree2MaximumSpanningForest(const Graph<Vertex>& graph) {
+ // Array of edges sorted in decreasing order of their weights.
+ vector<pair<double, pair<Vertex, Vertex> > > weighted_edges;
+ Graph<Vertex>* forest = new Graph<Vertex>();
+
+ // Disjoint-set to keep track of the connected components in the
+ // maximum spanning tree.
+ HashMap<Vertex, Vertex> disjoint_set;
+
+ // Sort of the edges in the graph in decreasing order of their
+ // weight. Also add the vertices of the graph to the Maximum
+ // Spanning Tree graph and set each vertex to be its own connected
+ // component in the disjoint_set structure.
+ const HashSet<Vertex>& vertices = graph.vertices();
+ for (typename HashSet<Vertex>::const_iterator it = vertices.begin();
+ it != vertices.end();
+ ++it) {
+ const Vertex vertex1 = *it;
+ forest->AddVertex(vertex1, graph.VertexWeight(vertex1));
+ disjoint_set[vertex1] = vertex1;
+
+ const HashSet<Vertex>& neighbors = graph.Neighbors(vertex1);
+ for (typename HashSet<Vertex>::const_iterator it2 = neighbors.begin();
+ it2 != neighbors.end();
+ ++it2) {
+ const Vertex vertex2 = *it2;
+ if (vertex1 >= vertex2) {
+ continue;
+ }
+ const double weight = graph.EdgeWeight(vertex1, vertex2);
+ weighted_edges.push_back(make_pair(weight, make_pair(vertex1, vertex2)));
+ }
+ }
+
+ // The elements of this vector, are pairs<edge_weight,
+ // edge>. Sorting it using the reverse iterators gives us the edges
+ // in decreasing order of edges.
+ sort(weighted_edges.rbegin(), weighted_edges.rend());
+
+ // Greedily add edges to the spanning tree/forest as long as they do
+ // not violate the degree/cycle constraint.
+ for (int i =0; i < weighted_edges.size(); ++i) {
+ const pair<Vertex, Vertex>& edge = weighted_edges[i].second;
+ const Vertex vertex1 = edge.first;
+ const Vertex vertex2 = edge.second;
+
+ // Check if either of the vertices are of degree 2 already, in
+ // which case adding this edge will violate the degree 2
+ // constraint.
+ if ((forest->Neighbors(vertex1).size() == 2) ||
+ (forest->Neighbors(vertex2).size() == 2)) {
+ continue;
+ }
+
+ // Find the id of the connected component to which the two
+ // vertices belong to. If the id is the same, it means that the
+ // two of them are already connected to each other via some other
+ // vertex, and adding this edge will create a cycle.
+ Vertex root1 = FindConnectedComponent(vertex1, &disjoint_set);
+ Vertex root2 = FindConnectedComponent(vertex2, &disjoint_set);
+
+ if (root1 == root2) {
+ continue;
+ }
+
+ // This edge can be added, add an edge in either direction with
+ // the same weight as the original graph.
+ const double edge_weight = graph.EdgeWeight(vertex1, vertex2);
+ forest->AddEdge(vertex1, vertex2, edge_weight);
+ forest->AddEdge(vertex2, vertex1, edge_weight);
+
+ // Connected the two connected components by updating the
+ // disjoint_set structure. Always connect the connected component
+ // with the greater index with the connected component with the
+ // smaller index. This should ensure shallower trees, for quicker
+ // lookup.
+ if (root2 < root1) {
+ std::swap(root1, root2);
+ };
+
+ disjoint_set[root2] = root1;
+ }
+ return forest;
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_GRAPH_ALGORITHMS_H_
diff --git a/internal/ceres/graph_algorithms_test.cc b/internal/ceres/graph_algorithms_test.cc
new file mode 100644
index 0000000..d5d4843
--- /dev/null
+++ b/internal/ceres/graph_algorithms_test.cc
@@ -0,0 +1,200 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/graph_algorithms.h"
+
+#include <algorithm>
+#include "gtest/gtest.h"
+#include "ceres/collections_port.h"
+#include "ceres/graph.h"
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(IndependentSetOrdering, Chain) {
+ Graph<int> graph;
+ graph.AddVertex(0);
+ graph.AddVertex(1);
+ graph.AddVertex(2);
+ graph.AddVertex(3);
+ graph.AddVertex(4);
+
+ graph.AddEdge(0, 1);
+ graph.AddEdge(1, 2);
+ graph.AddEdge(2, 3);
+ graph.AddEdge(3, 4);
+
+ // 0-1-2-3-4
+ // 0, 2, 4 should be in the independent set.
+ vector<int> ordering;
+ int independent_set_size = IndependentSetOrdering(graph, &ordering);
+
+ sort(ordering.begin(), ordering.begin() + 3);
+ sort(ordering.begin() + 3, ordering.end());
+
+ EXPECT_EQ(independent_set_size, 3);
+ EXPECT_EQ(ordering.size(), 5);
+ EXPECT_EQ(ordering[0], 0);
+ EXPECT_EQ(ordering[1], 2);
+ EXPECT_EQ(ordering[2], 4);
+ EXPECT_EQ(ordering[3], 1);
+ EXPECT_EQ(ordering[4], 3);
+}
+
+TEST(IndependentSetOrdering, Star) {
+ Graph<int> graph;
+ graph.AddVertex(0);
+ graph.AddVertex(1);
+ graph.AddVertex(2);
+ graph.AddVertex(3);
+ graph.AddVertex(4);
+
+ graph.AddEdge(0, 1);
+ graph.AddEdge(0, 2);
+ graph.AddEdge(0, 3);
+ graph.AddEdge(0, 4);
+
+ // 1
+ // |
+ // 4-0-2
+ // |
+ // 3
+ // 1, 2, 3, 4 should be in the indepdendent set.
+ vector<int> ordering;
+ int independent_set_size = IndependentSetOrdering(graph, &ordering);
+ EXPECT_EQ(independent_set_size, 4);
+ EXPECT_EQ(ordering.size(), 5);
+ EXPECT_EQ(ordering[4], 0);
+ sort(ordering.begin(), ordering.begin() + 4);
+ EXPECT_EQ(ordering[0], 1);
+ EXPECT_EQ(ordering[1], 2);
+ EXPECT_EQ(ordering[2], 3);
+ EXPECT_EQ(ordering[3], 4);
+}
+
+TEST(Degree2MaximumSpanningForest, PreserveWeights) {
+ Graph<int> graph;
+ graph.AddVertex(0, 1.0);
+ graph.AddVertex(1, 2.0);
+ graph.AddEdge(0, 1, 0.5);
+ graph.AddEdge(1, 0, 0.5);
+
+ scoped_ptr<Graph<int> > forest(Degree2MaximumSpanningForest(graph));
+
+ const HashSet<int>& vertices = forest->vertices();
+ EXPECT_EQ(vertices.size(), 2);
+ EXPECT_EQ(forest->VertexWeight(0), 1.0);
+ EXPECT_EQ(forest->VertexWeight(1), 2.0);
+ EXPECT_EQ(forest->Neighbors(0).size(), 1.0);
+ EXPECT_EQ(forest->EdgeWeight(0, 1), 0.5);
+}
+
+TEST(Degree2MaximumSpanningForest, StarGraph) {
+ Graph<int> graph;
+ graph.AddVertex(0);
+ graph.AddVertex(1);
+ graph.AddVertex(2);
+ graph.AddVertex(3);
+ graph.AddVertex(4);
+
+ graph.AddEdge(0, 1, 1.0);
+ graph.AddEdge(0, 2, 2.0);
+ graph.AddEdge(0, 3, 3.0);
+ graph.AddEdge(0, 4, 4.0);
+
+ scoped_ptr<Graph<int> > forest(Degree2MaximumSpanningForest(graph));
+ const HashSet<int>& vertices = forest->vertices();
+ EXPECT_EQ(vertices.size(), 5);
+
+ {
+ const HashSet<int>& neighbors = forest->Neighbors(0);
+ EXPECT_EQ(neighbors.size(), 2);
+ EXPECT_TRUE(neighbors.find(4) != neighbors.end());
+ EXPECT_TRUE(neighbors.find(3) != neighbors.end());
+ }
+
+ {
+ const HashSet<int>& neighbors = forest->Neighbors(3);
+ EXPECT_EQ(neighbors.size(), 1);
+ EXPECT_TRUE(neighbors.find(0) != neighbors.end());
+ }
+
+ {
+ const HashSet<int>& neighbors = forest->Neighbors(4);
+ EXPECT_EQ(neighbors.size(), 1);
+ EXPECT_TRUE(neighbors.find(0) != neighbors.end());
+ }
+
+ {
+ const HashSet<int>& neighbors = forest->Neighbors(1);
+ EXPECT_EQ(neighbors.size(), 0);
+ }
+
+ {
+ const HashSet<int>& neighbors = forest->Neighbors(2);
+ EXPECT_EQ(neighbors.size(), 0);
+ }
+}
+
+TEST(VertexDegreeLessThan, TotalOrdering) {
+ Graph<int> graph;
+ graph.AddVertex(0);
+ graph.AddVertex(1);
+ graph.AddVertex(2);
+ graph.AddVertex(3);
+
+ // 0-1
+ // |
+ // 2-3
+ // 0,1 and 2 have degree 1 and 3 has degree 2.
+ graph.AddEdge(0, 1, 1.0);
+ graph.AddEdge(2, 3, 1.0);
+ VertexDegreeLessThan<int> less_than(graph);
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_FALSE(less_than(i,i)) << "Failing vertex: " << i;
+ for (int j = 0; j < 4; ++j) {
+ if (i != j) {
+ EXPECT_TRUE(less_than(i, j) ^ less_than(j, i))
+ << "Failing vertex pair: " << i << " " << j;
+ }
+ }
+ }
+
+ for (int i = 0; i < 3; ++i) {
+ EXPECT_TRUE(less_than(i, 3));
+ EXPECT_FALSE(less_than(3, i));
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/graph_test.cc b/internal/ceres/graph_test.cc
new file mode 100644
index 0000000..85b80bf
--- /dev/null
+++ b/internal/ceres/graph_test.cc
@@ -0,0 +1,107 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/graph.h"
+
+#include "gtest/gtest.h"
+#include "ceres/collections_port.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(Graph, EmptyGraph) {
+ Graph<int> graph;
+ EXPECT_EQ(graph.vertices().size(), 0);
+}
+
+TEST(Graph, AddVertexAndEdge) {
+ Graph<int> graph;
+ graph.AddVertex(0, 1.0);
+ graph.AddVertex(1, 2.0);
+ graph.AddEdge(0, 1, 0.5);
+
+ const HashSet<int>& vertices = graph.vertices();
+ EXPECT_EQ(vertices.size(), 2);
+ EXPECT_EQ(graph.VertexWeight(0), 1.0);
+ EXPECT_EQ(graph.VertexWeight(1), 2.0);
+ EXPECT_EQ(graph.Neighbors(0).size(), 1);
+ EXPECT_EQ(graph.Neighbors(1).size(), 1);
+ EXPECT_EQ(graph.EdgeWeight(0, 1), 0.5);
+ EXPECT_EQ(graph.EdgeWeight(1, 0), 0.5);
+}
+
+TEST(Graph, AddVertexIdempotence) {
+ Graph<int> graph;
+ graph.AddVertex(0, 1.0);
+ graph.AddVertex(1, 2.0);
+ graph.AddEdge(0, 1, 0.5);
+
+ const HashSet<int>& vertices = graph.vertices();
+
+ EXPECT_EQ(vertices.size(), 2);
+
+ // Try adding the vertex again with a new weight.
+ graph.AddVertex(0, 3.0);
+ EXPECT_EQ(vertices.size(), 2);
+
+ // The vertex weight is reset.
+ EXPECT_EQ(graph.VertexWeight(0), 3.0);
+
+ // Rest of the graph remains the same.
+ EXPECT_EQ(graph.VertexWeight(1), 2.0);
+ EXPECT_EQ(graph.Neighbors(0).size(), 1);
+ EXPECT_EQ(graph.Neighbors(1).size(), 1);
+ EXPECT_EQ(graph.EdgeWeight(0, 1), 0.5);
+ EXPECT_EQ(graph.EdgeWeight(1, 0), 0.5);
+}
+
+TEST(Graph, DieOnNonExistentVertex) {
+ Graph<int> graph;
+ graph.AddVertex(0, 1.0);
+ graph.AddVertex(1, 2.0);
+ graph.AddEdge(0, 1, 0.5);
+
+ EXPECT_DEATH_IF_SUPPORTED(graph.VertexWeight(2), "key not found");
+ EXPECT_DEATH_IF_SUPPORTED(graph.Neighbors(2), "key not found");
+}
+
+TEST(Graph, NonExistentEdge) {
+ Graph<int> graph;
+ graph.AddVertex(0, 1.0);
+ graph.AddVertex(1, 2.0);
+ graph.AddEdge(0, 1, 0.5);
+
+ // Default value for non-existent edges is 0.
+ EXPECT_EQ(graph.EdgeWeight(2, 3), 0);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/gtest/gtest.h b/internal/ceres/gtest/gtest.h
new file mode 100644
index 0000000..3143bd6
--- /dev/null
+++ b/internal/ceres/gtest/gtest.h
@@ -0,0 +1,19537 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the public API for Google Test. It should be
+// included by any test program that uses Google Test.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user
+// program!
+//
+// Acknowledgment: Google Test borrowed the idea of automatic test
+// registration from Barthelemy Dagenais' (barthelemy@prologique.com)
+// easyUnit framework.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_H_
+
+#include <limits>
+#include <vector>
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file declares functions and macros used internally by
+// Google Test. They are subject to change without notice.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan)
+//
+// Low-level types and utilities for porting Google Test to various
+// platforms. They are subject to change without notice. DO NOT USE
+// THEM IN USER CODE.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+
+// The user can define the following macros in the build script to
+// control Google Test's behavior. If the user doesn't define a macro
+// in this list, Google Test will define it.
+//
+// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2)
+// is/isn't available.
+// GTEST_HAS_EXCEPTIONS - Define it to 1/0 to indicate that exceptions
+// are enabled.
+// GTEST_HAS_GLOBAL_STRING - Define it to 1/0 to indicate that ::string
+// is/isn't available (some systems define
+// ::string, which is different to std::string).
+// GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string
+// is/isn't available (some systems define
+// ::wstring, which is different to std::wstring).
+// GTEST_HAS_POSIX_RE - Define it to 1/0 to indicate that POSIX regular
+// expressions are/aren't available.
+// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that <pthread.h>
+// is/isn't available.
+// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't
+// enabled.
+// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that
+// std::wstring does/doesn't work (Google Test can
+// be used where std::wstring is unavailable).
+// GTEST_HAS_TR1_TUPLE - Define it to 1/0 to indicate tr1::tuple
+// is/isn't available.
+// GTEST_HAS_SEH - Define it to 1/0 to indicate whether the
+// compiler supports Microsoft's "Structured
+// Exception Handling".
+// GTEST_HAS_STREAM_REDIRECTION
+// - Define it to 1/0 to indicate whether the
+// platform supports I/O stream redirection using
+// dup() and dup2().
+// GTEST_USE_OWN_TR1_TUPLE - Define it to 1/0 to indicate whether Google
+// Test's own tr1 tuple implementation should be
+// used. Unused when the user sets
+// GTEST_HAS_TR1_TUPLE to 0.
+// GTEST_LINKED_AS_SHARED_LIBRARY
+// - Define to 1 when compiling tests that use
+// Google Test as a shared library (known as
+// DLL on Windows).
+// GTEST_CREATE_SHARED_LIBRARY
+// - Define to 1 when compiling Google Test itself
+// as a shared library.
+
+// This header defines the following utilities:
+//
+// Macros indicating the current platform (defined to 1 if compiled on
+// the given platform; otherwise undefined):
+// GTEST_OS_AIX - IBM AIX
+// GTEST_OS_CYGWIN - Cygwin
+// GTEST_OS_HPUX - HP-UX
+// GTEST_OS_LINUX - Linux
+// GTEST_OS_LINUX_ANDROID - Google Android
+// GTEST_OS_MAC - Mac OS X
+// GTEST_OS_NACL - Google Native Client (NaCl)
+// GTEST_OS_SOLARIS - Sun Solaris
+// GTEST_OS_SYMBIAN - Symbian
+// GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile)
+// GTEST_OS_WINDOWS_DESKTOP - Windows Desktop
+// GTEST_OS_WINDOWS_MINGW - MinGW
+// GTEST_OS_WINDOWS_MOBILE - Windows Mobile
+// GTEST_OS_ZOS - z/OS
+//
+// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the
+// most stable support. Since core members of the Google Test project
+// don't have access to other platforms, support for them may be less
+// stable. If you notice any problems on your platform, please notify
+// googletestframework@googlegroups.com (patches for fixing them are
+// even more welcome!).
+//
+// Note that it is possible that none of the GTEST_OS_* macros are defined.
+//
+// Macros indicating available Google Test features (defined to 1 if
+// the corresponding feature is supported; otherwise undefined):
+// GTEST_HAS_COMBINE - the Combine() function (for value-parameterized
+// tests)
+// GTEST_HAS_DEATH_TEST - death tests
+// GTEST_HAS_PARAM_TEST - value-parameterized tests
+// GTEST_HAS_TYPED_TEST - typed tests
+// GTEST_HAS_TYPED_TEST_P - type-parameterized tests
+// GTEST_USES_POSIX_RE - enhanced POSIX regex is used. Do not confuse with
+// GTEST_HAS_POSIX_RE (see above) which users can
+// define themselves.
+// GTEST_USES_SIMPLE_RE - our own simple regex is used;
+// the above two are mutually exclusive.
+// GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ().
+//
+// Macros for basic C++ coding:
+// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning.
+// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a
+// variable don't have to be used.
+// GTEST_DISALLOW_ASSIGN_ - disables operator=.
+// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=.
+// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used.
+//
+// Synchronization:
+// Mutex, MutexLock, ThreadLocal, GetThreadCount()
+// - synchronization primitives.
+// GTEST_IS_THREADSAFE - defined to 1 to indicate that the above
+// synchronization primitives have real implementations
+// and Google Test is thread-safe; or 0 otherwise.
+//
+// Template meta programming:
+// is_pointer - as in TR1; needed on Symbian and IBM XL C/C++ only.
+// IteratorTraits - partial implementation of std::iterator_traits, which
+// is not available in libCstd when compiled with Sun C++.
+//
+// Smart pointers:
+// scoped_ptr - as in TR2.
+//
+// Regular expressions:
+// RE - a simple regular expression class using the POSIX
+// Extended Regular Expression syntax on UNIX-like
+// platforms, or a reduced regular exception syntax on
+// other platforms, including Windows.
+//
+// Logging:
+// GTEST_LOG_() - logs messages at the specified severity level.
+// LogToStderr() - directs all log messages to stderr.
+// FlushInfoLog() - flushes informational log messages.
+//
+// Stdout and stderr capturing:
+// CaptureStdout() - starts capturing stdout.
+// GetCapturedStdout() - stops capturing stdout and returns the captured
+// string.
+// CaptureStderr() - starts capturing stderr.
+// GetCapturedStderr() - stops capturing stderr and returns the captured
+// string.
+//
+// Integer types:
+// TypeWithSize - maps an integer to a int type.
+// Int32, UInt32, Int64, UInt64, TimeInMillis
+// - integers of known sizes.
+// BiggestInt - the biggest signed integer type.
+//
+// Command-line utilities:
+// GTEST_FLAG() - references a flag.
+// GTEST_DECLARE_*() - declares a flag.
+// GTEST_DEFINE_*() - defines a flag.
+// GetArgvs() - returns the command line as a vector of strings.
+//
+// Environment variable utilities:
+// GetEnv() - gets the value of an environment variable.
+// BoolFromGTestEnv() - parses a bool environment variable.
+// Int32FromGTestEnv() - parses an Int32 environment variable.
+// StringFromGTestEnv() - parses a string environment variable.
+
+#include <ctype.h> // for isspace, etc
+#include <stddef.h> // for ptrdiff_t
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifndef _WIN32_WCE
+# include <sys/types.h>
+# include <sys/stat.h>
+#endif // !_WIN32_WCE
+
+#include <iostream> // NOLINT
+#include <sstream> // NOLINT
+#include <string> // NOLINT
+
+#define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com"
+#define GTEST_FLAG_PREFIX_ "gtest_"
+#define GTEST_FLAG_PREFIX_DASH_ "gtest-"
+#define GTEST_FLAG_PREFIX_UPPER_ "GTEST_"
+#define GTEST_NAME_ "Google Test"
+#define GTEST_PROJECT_URL_ "http://code.google.com/p/googletest/"
+
+// Determines the version of gcc that is used to compile this.
+#ifdef __GNUC__
+// 40302 means version 4.3.2.
+# define GTEST_GCC_VER_ \
+ (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__)
+#endif // __GNUC__
+
+// Determines the platform on which Google Test is compiled.
+#ifdef __CYGWIN__
+# define GTEST_OS_CYGWIN 1
+#elif defined __SYMBIAN32__
+# define GTEST_OS_SYMBIAN 1
+#elif defined _WIN32
+# define GTEST_OS_WINDOWS 1
+# ifdef _WIN32_WCE
+# define GTEST_OS_WINDOWS_MOBILE 1
+# elif defined(__MINGW__) || defined(__MINGW32__)
+# define GTEST_OS_WINDOWS_MINGW 1
+# else
+# define GTEST_OS_WINDOWS_DESKTOP 1
+# endif // _WIN32_WCE
+#elif defined __APPLE__
+# define GTEST_OS_MAC 1
+#elif defined __linux__
+# define GTEST_OS_LINUX 1
+# ifdef ANDROID
+# define GTEST_OS_LINUX_ANDROID 1
+# endif // ANDROID
+#elif defined __MVS__
+# define GTEST_OS_ZOS 1
+#elif defined(__sun) && defined(__SVR4)
+# define GTEST_OS_SOLARIS 1
+#elif defined(_AIX)
+# define GTEST_OS_AIX 1
+#elif defined(__hpux)
+# define GTEST_OS_HPUX 1
+#elif defined __native_client__
+# define GTEST_OS_NACL 1
+#endif // __CYGWIN__
+
+// Brings in definitions for functions used in the testing::internal::posix
+// namespace (read, write, close, chdir, isatty, stat). We do not currently
+// use them on Windows Mobile.
+#if !GTEST_OS_WINDOWS
+// This assumes that non-Windows OSes provide unistd.h. For OSes where this
+// is not the case, we need to include headers that provide the functions
+// mentioned above.
+# include <unistd.h>
+# if !GTEST_OS_NACL
+// TODO(vladl@google.com): Remove this condition when Native Client SDK adds
+// strings.h (tracked in
+// http://code.google.com/p/nativeclient/issues/detail?id=1175).
+# include <strings.h> // Native Client doesn't provide strings.h.
+# endif
+#elif !GTEST_OS_WINDOWS_MOBILE
+# include <direct.h>
+# include <io.h>
+#endif
+
+// Defines this to true iff Google Test can use POSIX regular expressions.
+#ifndef GTEST_HAS_POSIX_RE
+# define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS)
+#endif
+
+#if GTEST_HAS_POSIX_RE
+
+// On some platforms, <regex.h> needs someone to define size_t, and
+// won't compile otherwise. We can #include it here as we already
+// included <stdlib.h>, which is guaranteed to define size_t through
+// <stddef.h>.
+# include <regex.h> // NOLINT
+
+# define GTEST_USES_POSIX_RE 1
+
+#elif GTEST_OS_WINDOWS
+
+// <regex.h> is not available on Windows. Use our own simple regex
+// implementation instead.
+# define GTEST_USES_SIMPLE_RE 1
+
+#else
+
+// <regex.h> may not be available on this platform. Use our own
+// simple regex implementation instead.
+# define GTEST_USES_SIMPLE_RE 1
+
+#endif // GTEST_HAS_POSIX_RE
+
+#ifndef GTEST_HAS_EXCEPTIONS
+// The user didn't tell us whether exceptions are enabled, so we need
+// to figure it out.
+# if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS
+// macro to enable exceptions, so we'll do the same.
+// Assumes that exceptions are enabled by default.
+# ifndef _HAS_EXCEPTIONS
+# define _HAS_EXCEPTIONS 1
+# endif // _HAS_EXCEPTIONS
+# define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS
+# elif defined(__GNUC__) && __EXCEPTIONS
+// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__SUNPRO_CC)
+// Sun Pro CC supports exceptions. However, there is no compile-time way of
+// detecting whether they are enabled or not. Therefore, we assume that
+// they are enabled unless the user tells us otherwise.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__IBMCPP__) && __EXCEPTIONS
+// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__HP_aCC)
+// Exception handling is in effect by default in HP aCC compiler. It has to
+// be turned of by +noeh compiler option if desired.
+# define GTEST_HAS_EXCEPTIONS 1
+# else
+// For other compilers, we assume exceptions are disabled to be
+// conservative.
+# define GTEST_HAS_EXCEPTIONS 0
+# endif // defined(_MSC_VER) || defined(__BORLANDC__)
+#endif // GTEST_HAS_EXCEPTIONS
+
+#if !defined(GTEST_HAS_STD_STRING)
+// Even though we don't use this macro any longer, we keep it in case
+// some clients still depend on it.
+# define GTEST_HAS_STD_STRING 1
+#elif !GTEST_HAS_STD_STRING
+// The user told us that ::std::string isn't available.
+# error "Google Test cannot be used where ::std::string isn't available."
+#endif // !defined(GTEST_HAS_STD_STRING)
+
+#ifndef GTEST_HAS_GLOBAL_STRING
+// The user didn't tell us whether ::string is available, so we need
+// to figure it out.
+
+# define GTEST_HAS_GLOBAL_STRING 0
+
+#endif // GTEST_HAS_GLOBAL_STRING
+
+#ifndef GTEST_HAS_STD_WSTRING
+// The user didn't tell us whether ::std::wstring is available, so we need
+// to figure it out.
+// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring
+// is available.
+
+// Cygwin 1.7 and below doesn't support ::std::wstring.
+// Solaris' libc++ doesn't support it either. Android has
+// no support for it at least as recent as Froyo (2.2).
+# define GTEST_HAS_STD_WSTRING \
+ (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS))
+
+#endif // GTEST_HAS_STD_WSTRING
+
+#ifndef GTEST_HAS_GLOBAL_WSTRING
+// The user didn't tell us whether ::wstring is available, so we need
+// to figure it out.
+# define GTEST_HAS_GLOBAL_WSTRING \
+ (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING)
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+// Determines whether RTTI is available.
+#ifndef GTEST_HAS_RTTI
+// The user didn't tell us whether RTTI is enabled, so we need to
+// figure it out.
+
+# ifdef _MSC_VER
+
+# ifdef _CPPRTTI // MSVC defines this macro iff RTTI is enabled.
+# define GTEST_HAS_RTTI 1
+# else
+# define GTEST_HAS_RTTI 0
+# endif
+
+// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled.
+# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302)
+
+# ifdef __GXX_RTTI
+# define GTEST_HAS_RTTI 1
+# else
+# define GTEST_HAS_RTTI 0
+# endif // __GXX_RTTI
+
+// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if
+// both the typeid and dynamic_cast features are present.
+# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900)
+
+# ifdef __RTTI_ALL__
+# define GTEST_HAS_RTTI 1
+# else
+# define GTEST_HAS_RTTI 0
+# endif
+
+# else
+
+// For all other compilers, we assume RTTI is enabled.
+# define GTEST_HAS_RTTI 1
+
+# endif // _MSC_VER
+
+#endif // GTEST_HAS_RTTI
+
+// It's this header's responsibility to #include <typeinfo> when RTTI
+// is enabled.
+#if GTEST_HAS_RTTI
+# include <typeinfo>
+#endif
+
+// Determines whether Google Test can use the pthreads library.
+#ifndef GTEST_HAS_PTHREAD
+// The user didn't tell us explicitly, so we assume pthreads support is
+// available on Linux and Mac.
+//
+// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0
+// to your compiler flags.
+# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX)
+#endif // GTEST_HAS_PTHREAD
+
+#if GTEST_HAS_PTHREAD
+// gtest-port.h guarantees to #include <pthread.h> when GTEST_HAS_PTHREAD is
+// true.
+# include <pthread.h> // NOLINT
+
+// For timespec and nanosleep, used below.
+# include <time.h> // NOLINT
+#endif
+
+// Determines whether Google Test can use tr1/tuple. You can define
+// this macro to 0 to prevent Google Test from using tuple (any
+// feature depending on tuple with be disabled in this mode).
+#ifndef GTEST_HAS_TR1_TUPLE
+// The user didn't tell us not to do it, so we assume it's OK.
+# define GTEST_HAS_TR1_TUPLE 1
+#endif // GTEST_HAS_TR1_TUPLE
+
+// Determines whether Google Test's own tr1 tuple implementation
+// should be used.
+#ifndef GTEST_USE_OWN_TR1_TUPLE
+// The user didn't tell us, so we need to figure it out.
+
+// We use our own TR1 tuple if we aren't sure the user has an
+// implementation of it already. At this time, GCC 4.0.0+ and MSVC
+// 2010 are the only mainstream compilers that come with a TR1 tuple
+// implementation. NVIDIA's CUDA NVCC compiler pretends to be GCC by
+// defining __GNUC__ and friends, but cannot compile GCC's tuple
+// implementation. MSVC 2008 (9.0) provides TR1 tuple in a 323 MB
+// Feature Pack download, which we cannot assume the user has.
+# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000)) \
+ || _MSC_VER >= 1600
+# define GTEST_USE_OWN_TR1_TUPLE 0
+# else
+# define GTEST_USE_OWN_TR1_TUPLE 1
+# endif
+
+#endif // GTEST_USE_OWN_TR1_TUPLE
+
+// To avoid conditional compilation everywhere, we make it
+// gtest-port.h's responsibility to #include the header implementing
+// tr1/tuple.
+#if GTEST_HAS_TR1_TUPLE
+
+# if GTEST_USE_OWN_TR1_TUPLE
+// This file was GENERATED by a script. DO NOT EDIT BY HAND!!!
+
+// Copyright 2009 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Implements a subset of TR1 tuple needed by Google Test and Google Mock.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+
+#include <utility> // For ::std::pair.
+
+// The compiler used in Symbian has a bug that prevents us from declaring the
+// tuple template as a friend (it complains that tuple is redefined). This
+// hack bypasses the bug by declaring the members that should otherwise be
+// private as public.
+// Sun Studio versions < 12 also have the above bug.
+#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590)
+# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public:
+#else
+# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \
+ template <GTEST_10_TYPENAMES_(U)> friend class tuple; \
+ private:
+#endif
+
+// GTEST_n_TUPLE_(T) is the type of an n-tuple.
+#define GTEST_0_TUPLE_(T) tuple<>
+#define GTEST_1_TUPLE_(T) tuple<T##0, void, void, void, void, void, void, \
+ void, void, void>
+#define GTEST_2_TUPLE_(T) tuple<T##0, T##1, void, void, void, void, void, \
+ void, void, void>
+#define GTEST_3_TUPLE_(T) tuple<T##0, T##1, T##2, void, void, void, void, \
+ void, void, void>
+#define GTEST_4_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, void, void, void, \
+ void, void, void>
+#define GTEST_5_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, void, void, \
+ void, void, void>
+#define GTEST_6_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, void, \
+ void, void, void>
+#define GTEST_7_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ void, void, void>
+#define GTEST_8_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, void, void>
+#define GTEST_9_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, T##8, void>
+#define GTEST_10_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, T##8, T##9>
+
+// GTEST_n_TYPENAMES_(T) declares a list of n typenames.
+#define GTEST_0_TYPENAMES_(T)
+#define GTEST_1_TYPENAMES_(T) typename T##0
+#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1
+#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2
+#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3
+#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4
+#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5
+#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6
+#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, typename T##7
+#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, \
+ typename T##7, typename T##8
+#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, \
+ typename T##7, typename T##8, typename T##9
+
+// In theory, defining stuff in the ::std namespace is undefined
+// behavior. We can do this as we are playing the role of a standard
+// library vendor.
+namespace std {
+namespace tr1 {
+
+template <typename T0 = void, typename T1 = void, typename T2 = void,
+ typename T3 = void, typename T4 = void, typename T5 = void,
+ typename T6 = void, typename T7 = void, typename T8 = void,
+ typename T9 = void>
+class tuple;
+
+// Anything in namespace gtest_internal is Google Test's INTERNAL
+// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code.
+namespace gtest_internal {
+
+// ByRef<T>::type is T if T is a reference; otherwise it's const T&.
+template <typename T>
+struct ByRef { typedef const T& type; }; // NOLINT
+template <typename T>
+struct ByRef<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper for ByRef.
+#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef<T>::type
+
+// AddRef<T>::type is T if T is a reference; otherwise it's T&. This
+// is the same as tr1::add_reference<T>::type.
+template <typename T>
+struct AddRef { typedef T& type; }; // NOLINT
+template <typename T>
+struct AddRef<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper for AddRef.
+#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef<T>::type
+
+// A helper for implementing get<k>().
+template <int k> class Get;
+
+// A helper for implementing tuple_element<k, T>. kIndexValid is true
+// iff k < the number of fields in tuple type T.
+template <bool kIndexValid, int kIndex, class Tuple>
+struct TupleElement;
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 0, GTEST_10_TUPLE_(T)> { typedef T0 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 1, GTEST_10_TUPLE_(T)> { typedef T1 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 2, GTEST_10_TUPLE_(T)> { typedef T2 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 3, GTEST_10_TUPLE_(T)> { typedef T3 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 4, GTEST_10_TUPLE_(T)> { typedef T4 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 5, GTEST_10_TUPLE_(T)> { typedef T5 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 6, GTEST_10_TUPLE_(T)> { typedef T6 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 7, GTEST_10_TUPLE_(T)> { typedef T7 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 8, GTEST_10_TUPLE_(T)> { typedef T8 type; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 9, GTEST_10_TUPLE_(T)> { typedef T9 type; };
+
+} // namespace gtest_internal
+
+template <>
+class tuple<> {
+ public:
+ tuple() {}
+ tuple(const tuple& /* t */) {}
+ tuple& operator=(const tuple& /* t */) { return *this; }
+};
+
+template <GTEST_1_TYPENAMES_(T)>
+class GTEST_1_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {}
+
+ tuple(const tuple& t) : f0_(t.f0_) {}
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_1_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ return *this;
+ }
+
+ T0 f0_;
+};
+
+template <GTEST_2_TYPENAMES_(T)>
+class GTEST_2_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0),
+ f1_(f1) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {}
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {}
+ template <typename U0, typename U1>
+ tuple(const ::std::pair<U0, U1>& p) : f0_(p.first), f1_(p.second) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_2_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+ template <typename U0, typename U1>
+ tuple& operator=(const ::std::pair<U0, U1>& p) {
+ f0_ = p.first;
+ f1_ = p.second;
+ return *this;
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+};
+
+template <GTEST_3_TYPENAMES_(T)>
+class GTEST_3_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_3_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+};
+
+template <GTEST_4_TYPENAMES_(T)>
+class GTEST_4_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {}
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_4_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+};
+
+template <GTEST_5_TYPENAMES_(T)>
+class GTEST_5_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3,
+ GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_) {}
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_5_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+};
+
+template <GTEST_6_TYPENAMES_(T)>
+class GTEST_6_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_) {}
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_6_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+};
+
+template <GTEST_7_TYPENAMES_(T)>
+class GTEST_7_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3), f4_(f4), f5_(f5), f6_(f6) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_7_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+};
+
+template <GTEST_8_TYPENAMES_(T)>
+class GTEST_8_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6,
+ GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5), f6_(f6), f7_(f7) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_8_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+};
+
+template <GTEST_9_TYPENAMES_(T)>
+class GTEST_9_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+ GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5), f6_(f6), f7_(f7), f8_(f8) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_9_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ f8_ = t.f8_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+ T8 f8_;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+class tuple {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(),
+ f9_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+ GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {}
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_),
+ f9_(t.f9_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_10_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ f8_ = t.f8_;
+ f9_ = t.f9_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+ T8 f8_;
+ T9 f9_;
+};
+
+// 6.1.3.2 Tuple creation functions.
+
+// Known limitations: we don't support passing an
+// std::tr1::reference_wrapper<T> to make_tuple(). And we don't
+// implement tie().
+
+inline tuple<> make_tuple() { return tuple<>(); }
+
+template <GTEST_1_TYPENAMES_(T)>
+inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) {
+ return GTEST_1_TUPLE_(T)(f0);
+}
+
+template <GTEST_2_TYPENAMES_(T)>
+inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) {
+ return GTEST_2_TUPLE_(T)(f0, f1);
+}
+
+template <GTEST_3_TYPENAMES_(T)>
+inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) {
+ return GTEST_3_TUPLE_(T)(f0, f1, f2);
+}
+
+template <GTEST_4_TYPENAMES_(T)>
+inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3) {
+ return GTEST_4_TUPLE_(T)(f0, f1, f2, f3);
+}
+
+template <GTEST_5_TYPENAMES_(T)>
+inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4) {
+ return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4);
+}
+
+template <GTEST_6_TYPENAMES_(T)>
+inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5) {
+ return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5);
+}
+
+template <GTEST_7_TYPENAMES_(T)>
+inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6) {
+ return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6);
+}
+
+template <GTEST_8_TYPENAMES_(T)>
+inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) {
+ return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7);
+}
+
+template <GTEST_9_TYPENAMES_(T)>
+inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+ const T8& f8) {
+ return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8);
+}
+
+template <GTEST_10_TYPENAMES_(T)>
+inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+ const T8& f8, const T9& f9) {
+ return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9);
+}
+
+// 6.1.3.3 Tuple helper classes.
+
+template <typename Tuple> struct tuple_size;
+
+template <GTEST_0_TYPENAMES_(T)>
+struct tuple_size<GTEST_0_TUPLE_(T)> { static const int value = 0; };
+
+template <GTEST_1_TYPENAMES_(T)>
+struct tuple_size<GTEST_1_TUPLE_(T)> { static const int value = 1; };
+
+template <GTEST_2_TYPENAMES_(T)>
+struct tuple_size<GTEST_2_TUPLE_(T)> { static const int value = 2; };
+
+template <GTEST_3_TYPENAMES_(T)>
+struct tuple_size<GTEST_3_TUPLE_(T)> { static const int value = 3; };
+
+template <GTEST_4_TYPENAMES_(T)>
+struct tuple_size<GTEST_4_TUPLE_(T)> { static const int value = 4; };
+
+template <GTEST_5_TYPENAMES_(T)>
+struct tuple_size<GTEST_5_TUPLE_(T)> { static const int value = 5; };
+
+template <GTEST_6_TYPENAMES_(T)>
+struct tuple_size<GTEST_6_TUPLE_(T)> { static const int value = 6; };
+
+template <GTEST_7_TYPENAMES_(T)>
+struct tuple_size<GTEST_7_TUPLE_(T)> { static const int value = 7; };
+
+template <GTEST_8_TYPENAMES_(T)>
+struct tuple_size<GTEST_8_TUPLE_(T)> { static const int value = 8; };
+
+template <GTEST_9_TYPENAMES_(T)>
+struct tuple_size<GTEST_9_TUPLE_(T)> { static const int value = 9; };
+
+template <GTEST_10_TYPENAMES_(T)>
+struct tuple_size<GTEST_10_TUPLE_(T)> { static const int value = 10; };
+
+template <int k, class Tuple>
+struct tuple_element {
+ typedef typename gtest_internal::TupleElement<
+ k < (tuple_size<Tuple>::value), k, Tuple>::type type;
+};
+
+#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element<k, Tuple >::type
+
+// 6.1.3.4 Element access.
+
+namespace gtest_internal {
+
+template <>
+class Get<0> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+ Field(Tuple& t) { return t.f0_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+ ConstField(const Tuple& t) { return t.f0_; }
+};
+
+template <>
+class Get<1> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+ Field(Tuple& t) { return t.f1_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+ ConstField(const Tuple& t) { return t.f1_; }
+};
+
+template <>
+class Get<2> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+ Field(Tuple& t) { return t.f2_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+ ConstField(const Tuple& t) { return t.f2_; }
+};
+
+template <>
+class Get<3> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+ Field(Tuple& t) { return t.f3_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+ ConstField(const Tuple& t) { return t.f3_; }
+};
+
+template <>
+class Get<4> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+ Field(Tuple& t) { return t.f4_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+ ConstField(const Tuple& t) { return t.f4_; }
+};
+
+template <>
+class Get<5> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+ Field(Tuple& t) { return t.f5_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+ ConstField(const Tuple& t) { return t.f5_; }
+};
+
+template <>
+class Get<6> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+ Field(Tuple& t) { return t.f6_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+ ConstField(const Tuple& t) { return t.f6_; }
+};
+
+template <>
+class Get<7> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+ Field(Tuple& t) { return t.f7_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+ ConstField(const Tuple& t) { return t.f7_; }
+};
+
+template <>
+class Get<8> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+ Field(Tuple& t) { return t.f8_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+ ConstField(const Tuple& t) { return t.f8_; }
+};
+
+template <>
+class Get<9> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+ Field(Tuple& t) { return t.f9_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+ ConstField(const Tuple& t) { return t.f9_; }
+};
+
+} // namespace gtest_internal
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
+get(GTEST_10_TUPLE_(T)& t) {
+ return gtest_internal::Get<k>::Field(t);
+}
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
+get(const GTEST_10_TUPLE_(T)& t) {
+ return gtest_internal::Get<k>::ConstField(t);
+}
+
+// 6.1.3.5 Relational operators
+
+// We only implement == and !=, as we don't have a need for the rest yet.
+
+namespace gtest_internal {
+
+// SameSizeTuplePrefixComparator<k, k>::Eq(t1, t2) returns true if the
+// first k fields of t1 equals the first k fields of t2.
+// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if
+// k1 != k2.
+template <int kSize1, int kSize2>
+struct SameSizeTuplePrefixComparator;
+
+template <>
+struct SameSizeTuplePrefixComparator<0, 0> {
+ template <class Tuple1, class Tuple2>
+ static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) {
+ return true;
+ }
+};
+
+template <int k>
+struct SameSizeTuplePrefixComparator<k, k> {
+ template <class Tuple1, class Tuple2>
+ static bool Eq(const Tuple1& t1, const Tuple2& t2) {
+ return SameSizeTuplePrefixComparator<k - 1, k - 1>::Eq(t1, t2) &&
+ ::std::tr1::get<k - 1>(t1) == ::std::tr1::get<k - 1>(t2);
+ }
+};
+
+} // namespace gtest_internal
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator==(const GTEST_10_TUPLE_(T)& t,
+ const GTEST_10_TUPLE_(U)& u) {
+ return gtest_internal::SameSizeTuplePrefixComparator<
+ tuple_size<GTEST_10_TUPLE_(T)>::value,
+ tuple_size<GTEST_10_TUPLE_(U)>::value>::Eq(t, u);
+}
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator!=(const GTEST_10_TUPLE_(T)& t,
+ const GTEST_10_TUPLE_(U)& u) { return !(t == u); }
+
+// 6.1.4 Pairs.
+// Unimplemented.
+
+} // namespace tr1
+} // namespace std
+
+#undef GTEST_0_TUPLE_
+#undef GTEST_1_TUPLE_
+#undef GTEST_2_TUPLE_
+#undef GTEST_3_TUPLE_
+#undef GTEST_4_TUPLE_
+#undef GTEST_5_TUPLE_
+#undef GTEST_6_TUPLE_
+#undef GTEST_7_TUPLE_
+#undef GTEST_8_TUPLE_
+#undef GTEST_9_TUPLE_
+#undef GTEST_10_TUPLE_
+
+#undef GTEST_0_TYPENAMES_
+#undef GTEST_1_TYPENAMES_
+#undef GTEST_2_TYPENAMES_
+#undef GTEST_3_TYPENAMES_
+#undef GTEST_4_TYPENAMES_
+#undef GTEST_5_TYPENAMES_
+#undef GTEST_6_TYPENAMES_
+#undef GTEST_7_TYPENAMES_
+#undef GTEST_8_TYPENAMES_
+#undef GTEST_9_TYPENAMES_
+#undef GTEST_10_TYPENAMES_
+
+#undef GTEST_DECLARE_TUPLE_AS_FRIEND_
+#undef GTEST_BY_REF_
+#undef GTEST_ADD_REF_
+#undef GTEST_TUPLE_ELEMENT_
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+# elif GTEST_OS_SYMBIAN
+
+// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to
+// use STLport's tuple implementation, which unfortunately doesn't
+// work as the copy of STLport distributed with Symbian is incomplete.
+// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to
+// use its own tuple implementation.
+# ifdef BOOST_HAS_TR1_TUPLE
+# undef BOOST_HAS_TR1_TUPLE
+# endif // BOOST_HAS_TR1_TUPLE
+
+// This prevents <boost/tr1/detail/config.hpp>, which defines
+// BOOST_HAS_TR1_TUPLE, from being #included by Boost's <tuple>.
+# define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED
+# include <tuple>
+
+# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000)
+// GCC 4.0+ implements tr1/tuple in the <tr1/tuple> header. This does
+// not conform to the TR1 spec, which requires the header to be <tuple>.
+
+# if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+// Until version 4.3.2, gcc has a bug that causes <tr1/functional>,
+// which is #included by <tr1/tuple>, to not compile when RTTI is
+// disabled. _TR1_FUNCTIONAL is the header guard for
+// <tr1/functional>. Hence the following #define is a hack to prevent
+// <tr1/functional> from being included.
+# define _TR1_FUNCTIONAL 1
+# include <tr1/tuple>
+# undef _TR1_FUNCTIONAL // Allows the user to #include
+ // <tr1/functional> if he chooses to.
+# else
+# include <tr1/tuple> // NOLINT
+# endif // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+
+# else
+// If the compiler is not GCC 4.0+, we assume the user is using a
+// spec-conforming TR1 implementation.
+# include <tuple> // NOLINT
+# endif // GTEST_USE_OWN_TR1_TUPLE
+
+#endif // GTEST_HAS_TR1_TUPLE
+
+// Determines whether clone(2) is supported.
+// Usually it will only be available on Linux, excluding
+// Linux on the Itanium architecture.
+// Also see http://linux.die.net/man/2/clone.
+#ifndef GTEST_HAS_CLONE
+// The user didn't tell us, so we need to figure it out.
+
+# if GTEST_OS_LINUX && !defined(__ia64__)
+# define GTEST_HAS_CLONE 1
+# else
+# define GTEST_HAS_CLONE 0
+# endif // GTEST_OS_LINUX && !defined(__ia64__)
+
+#endif // GTEST_HAS_CLONE
+
+// Determines whether to support stream redirection. This is used to test
+// output correctness and to implement death tests.
+#ifndef GTEST_HAS_STREAM_REDIRECTION
+// By default, we assume that stream redirection is supported on all
+// platforms except known mobile ones.
+# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN
+# define GTEST_HAS_STREAM_REDIRECTION 0
+# else
+# define GTEST_HAS_STREAM_REDIRECTION 1
+# endif // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN
+#endif // GTEST_HAS_STREAM_REDIRECTION
+
+// Determines whether to support death tests.
+// Google Test does not support death tests for VC 7.1 and earlier as
+// abort() in a VC 7.1 application compiled as GUI in debug config
+// pops up a dialog window that cannot be suppressed programmatically.
+#if (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \
+ (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \
+ GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX)
+# define GTEST_HAS_DEATH_TEST 1
+# include <vector> // NOLINT
+#endif
+
+// We don't support MSVC 7.1 with exceptions disabled now. Therefore
+// all the compilers we care about are adequate for supporting
+// value-parameterized tests.
+#define GTEST_HAS_PARAM_TEST 1
+
+// Determines whether to support type-driven tests.
+
+// Typed tests need <typeinfo> and variadic macros, which GCC, VC++ 8.0,
+// Sun Pro CC, IBM Visual Age, and HP aCC support.
+#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \
+ defined(__IBMCPP__) || defined(__HP_aCC)
+# define GTEST_HAS_TYPED_TEST 1
+# define GTEST_HAS_TYPED_TEST_P 1
+#endif
+
+// Determines whether to support Combine(). This only makes sense when
+// value-parameterized tests are enabled. The implementation doesn't
+// work on Sun Studio since it doesn't understand templated conversion
+// operators.
+#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC)
+# define GTEST_HAS_COMBINE 1
+#endif
+
+// Determines whether the system compiler uses UTF-16 for encoding wide strings.
+#define GTEST_WIDE_STRING_USES_UTF16_ \
+ (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX)
+
+// Determines whether test results can be streamed to a socket.
+#if GTEST_OS_LINUX
+# define GTEST_CAN_STREAM_RESULTS_ 1
+#endif
+
+// Defines some utility macros.
+
+// The GNU compiler emits a warning if nested "if" statements are followed by
+// an "else" statement and braces are not used to explicitly disambiguate the
+// "else" binding. This leads to problems with code like:
+//
+// if (gate)
+// ASSERT_*(condition) << "Some message";
+//
+// The "switch (0) case 0:" idiom is used to suppress this.
+#ifdef __INTEL_COMPILER
+# define GTEST_AMBIGUOUS_ELSE_BLOCKER_
+#else
+# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default: // NOLINT
+#endif
+
+// Use this annotation at the end of a struct/class definition to
+// prevent the compiler from optimizing away instances that are never
+// used. This is useful when all interesting logic happens inside the
+// c'tor and / or d'tor. Example:
+//
+// struct Foo {
+// Foo() { ... }
+// } GTEST_ATTRIBUTE_UNUSED_;
+//
+// Also use it after a variable or parameter declaration to tell the
+// compiler the variable/parameter does not have to be used.
+#if defined(__GNUC__) && !defined(COMPILER_ICC)
+# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
+#else
+# define GTEST_ATTRIBUTE_UNUSED_
+#endif
+
+// A macro to disallow operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_ASSIGN_(type)\
+ void operator=(type const &)
+
+// A macro to disallow copy constructor and operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\
+ type(type const &);\
+ GTEST_DISALLOW_ASSIGN_(type)
+
+// Tell the compiler to warn about unused return values for functions declared
+// with this macro. The macro should be used on function declarations
+// following the argument list:
+//
+// Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_;
+#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC)
+# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result))
+#else
+# define GTEST_MUST_USE_RESULT_
+#endif // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC
+
+// Determine whether the compiler supports Microsoft's Structured Exception
+// Handling. This is supported by several Windows compilers but generally
+// does not exist on any other system.
+#ifndef GTEST_HAS_SEH
+// The user didn't tell us, so we need to figure it out.
+
+# if defined(_MSC_VER) || defined(__BORLANDC__)
+// These two compilers are known to support SEH.
+# define GTEST_HAS_SEH 1
+# else
+// Assume no SEH.
+# define GTEST_HAS_SEH 0
+# endif
+
+#endif // GTEST_HAS_SEH
+
+#ifdef _MSC_VER
+
+# if GTEST_LINKED_AS_SHARED_LIBRARY
+# define GTEST_API_ __declspec(dllimport)
+# elif GTEST_CREATE_SHARED_LIBRARY
+# define GTEST_API_ __declspec(dllexport)
+# endif
+
+#endif // _MSC_VER
+
+#ifndef GTEST_API_
+# define GTEST_API_
+#endif
+
+#ifdef __GNUC__
+// Ask the compiler to never inline a given function.
+# define GTEST_NO_INLINE_ __attribute__((noinline))
+#else
+# define GTEST_NO_INLINE_
+#endif
+
+namespace testing {
+
+class Message;
+
+namespace internal {
+
+class String;
+
+// The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+// GTEST_COMPILE_ASSERT_(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES,
+// content_type_names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+// GTEST_COMPILE_ASSERT_(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+
+template <bool>
+struct CompileAssert {
+};
+
+#define GTEST_COMPILE_ASSERT_(expr, msg) \
+ typedef ::testing::internal::CompileAssert<(bool(expr))> \
+ msg[bool(expr) ? 1 : -1]
+
+// Implementation details of GTEST_COMPILE_ASSERT_:
+//
+// - GTEST_COMPILE_ASSERT_ works by defining an array type that has -1
+// elements (and thus is invalid) when the expression is false.
+//
+// - The simpler definition
+//
+// #define GTEST_COMPILE_ASSERT_(expr, msg) typedef char msg[(expr) ? 1 : -1]
+//
+// does not work, as gcc supports variable-length arrays whose sizes
+// are determined at run-time (this is gcc's extension and not part
+// of the C++ standard). As a result, gcc fails to reject the
+// following code with the simple definition:
+//
+// int foo;
+// GTEST_COMPILE_ASSERT_(foo, msg); // not supposed to compile as foo is
+// // not a compile-time constant.
+//
+// - By using the type CompileAssert<(bool(expr))>, we ensures that
+// expr is a compile-time constant. (Template arguments must be
+// determined at compile-time.)
+//
+// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
+// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
+//
+// CompileAssert<bool(expr)>
+//
+// instead, these compilers will refuse to compile
+//
+// GTEST_COMPILE_ASSERT_(5 > 0, some_message);
+//
+// (They seem to think the ">" in "5 > 0" marks the end of the
+// template argument list.)
+//
+// - The array size is (bool(expr) ? 1 : -1), instead of simply
+//
+// ((expr) ? 1 : -1).
+//
+// This is to avoid running into a bug in MS VC 7.1, which
+// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
+
+// StaticAssertTypeEqHelper is used by StaticAssertTypeEq defined in gtest.h.
+//
+// This template is declared, but intentionally undefined.
+template <typename T1, typename T2>
+struct StaticAssertTypeEqHelper;
+
+template <typename T>
+struct StaticAssertTypeEqHelper<T, T> {};
+
+#if GTEST_HAS_GLOBAL_STRING
+typedef ::string string;
+#else
+typedef ::std::string string;
+#endif // GTEST_HAS_GLOBAL_STRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+typedef ::wstring wstring;
+#elif GTEST_HAS_STD_WSTRING
+typedef ::std::wstring wstring;
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+// A helper for suppressing warnings on constant condition. It just
+// returns 'condition'.
+GTEST_API_ bool IsTrue(bool condition);
+
+// Defines scoped_ptr.
+
+// This implementation of scoped_ptr is PARTIAL - it only contains
+// enough stuff to satisfy Google Test's need.
+template <typename T>
+class scoped_ptr {
+ public:
+ typedef T element_type;
+
+ explicit scoped_ptr(T* p = NULL) : ptr_(p) {}
+ ~scoped_ptr() { reset(); }
+
+ T& operator*() const { return *ptr_; }
+ T* operator->() const { return ptr_; }
+ T* get() const { return ptr_; }
+
+ T* release() {
+ T* const ptr = ptr_;
+ ptr_ = NULL;
+ return ptr;
+ }
+
+ void reset(T* p = NULL) {
+ if (p != ptr_) {
+ if (IsTrue(sizeof(T) > 0)) { // Makes sure T is a complete type.
+ delete ptr_;
+ }
+ ptr_ = p;
+ }
+ }
+ private:
+ T* ptr_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr);
+};
+
+// Defines RE.
+
+// A simple C++ wrapper for <regex.h>. It uses the POSIX Extended
+// Regular Expression syntax.
+class GTEST_API_ RE {
+ public:
+ // A copy constructor is required by the Standard to initialize object
+ // references from r-values.
+ RE(const RE& other) { Init(other.pattern()); }
+
+ // Constructs an RE from a string.
+ RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT
+
+#if GTEST_HAS_GLOBAL_STRING
+
+ RE(const ::string& regex) { Init(regex.c_str()); } // NOLINT
+
+#endif // GTEST_HAS_GLOBAL_STRING
+
+ RE(const char* regex) { Init(regex); } // NOLINT
+ ~RE();
+
+ // Returns the string representation of the regex.
+ const char* pattern() const { return pattern_; }
+
+ // FullMatch(str, re) returns true iff regular expression re matches
+ // the entire str.
+ // PartialMatch(str, re) returns true iff regular expression re
+ // matches a substring of str (including str itself).
+ //
+ // TODO(wan@google.com): make FullMatch() and PartialMatch() work
+ // when str contains NUL characters.
+ static bool FullMatch(const ::std::string& str, const RE& re) {
+ return FullMatch(str.c_str(), re);
+ }
+ static bool PartialMatch(const ::std::string& str, const RE& re) {
+ return PartialMatch(str.c_str(), re);
+ }
+
+#if GTEST_HAS_GLOBAL_STRING
+
+ static bool FullMatch(const ::string& str, const RE& re) {
+ return FullMatch(str.c_str(), re);
+ }
+ static bool PartialMatch(const ::string& str, const RE& re) {
+ return PartialMatch(str.c_str(), re);
+ }
+
+#endif // GTEST_HAS_GLOBAL_STRING
+
+ static bool FullMatch(const char* str, const RE& re);
+ static bool PartialMatch(const char* str, const RE& re);
+
+ private:
+ void Init(const char* regex);
+
+ // We use a const char* instead of a string, as Google Test may be used
+ // where string is not available. We also do not use Google Test's own
+ // String type here, in order to simplify dependencies between the
+ // files.
+ const char* pattern_;
+ bool is_valid_;
+
+#if GTEST_USES_POSIX_RE
+
+ regex_t full_regex_; // For FullMatch().
+ regex_t partial_regex_; // For PartialMatch().
+
+#else // GTEST_USES_SIMPLE_RE
+
+ const char* full_pattern_; // For FullMatch();
+
+#endif
+
+ GTEST_DISALLOW_ASSIGN_(RE);
+};
+
+// Formats a source file path and a line number as they would appear
+// in an error message from the compiler used to compile this code.
+GTEST_API_ ::std::string FormatFileLocation(const char* file, int line);
+
+// Formats a file location for compiler-independent XML output.
+// Although this function is not platform dependent, we put it next to
+// FormatFileLocation in order to contrast the two functions.
+GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file,
+ int line);
+
+// Defines logging utilities:
+// GTEST_LOG_(severity) - logs messages at the specified severity level. The
+// message itself is streamed into the macro.
+// LogToStderr() - directs all log messages to stderr.
+// FlushInfoLog() - flushes informational log messages.
+
+enum GTestLogSeverity {
+ GTEST_INFO,
+ GTEST_WARNING,
+ GTEST_ERROR,
+ GTEST_FATAL
+};
+
+// Formats log entry severity, provides a stream object for streaming the
+// log message, and terminates the message with a newline when going out of
+// scope.
+class GTEST_API_ GTestLog {
+ public:
+ GTestLog(GTestLogSeverity severity, const char* file, int line);
+
+ // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+ ~GTestLog();
+
+ ::std::ostream& GetStream() { return ::std::cerr; }
+
+ private:
+ const GTestLogSeverity severity_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog);
+};
+
+#define GTEST_LOG_(severity) \
+ ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \
+ __FILE__, __LINE__).GetStream()
+
+inline void LogToStderr() {}
+inline void FlushInfoLog() { fflush(NULL); }
+
+// INTERNAL IMPLEMENTATION - DO NOT USE.
+//
+// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
+// is not satisfied.
+// Synopsys:
+// GTEST_CHECK_(boolean_condition);
+// or
+// GTEST_CHECK_(boolean_condition) << "Additional message";
+//
+// This checks the condition and if the condition is not satisfied
+// it prints message about the condition violation, including the
+// condition itself, plus additional message streamed into it, if any,
+// and then it aborts the program. It aborts the program irrespective of
+// whether it is built in the debug mode or not.
+#define GTEST_CHECK_(condition) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::IsTrue(condition)) \
+ ; \
+ else \
+ GTEST_LOG_(FATAL) << "Condition " #condition " failed. "
+
+// An all-mode assert to verify that the given POSIX-style function
+// call returns 0 (indicating success). Known limitation: this
+// doesn't expand to a balanced 'if' statement, so enclose the macro
+// in {} if you need to use it as the only statement in an 'if'
+// branch.
+#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \
+ if (const int gtest_error = (posix_call)) \
+ GTEST_LOG_(FATAL) << #posix_call << "failed with error " \
+ << gtest_error
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Use ImplicitCast_ as a safe version of static_cast for upcasting in
+// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a
+// const Foo*). When you use ImplicitCast_, the compiler checks that
+// the cast is safe. Such explicit ImplicitCast_s are necessary in
+// surprisingly many situations where C++ demands an exact type match
+// instead of an argument type convertable to a target type.
+//
+// The syntax for using ImplicitCast_ is the same as for static_cast:
+//
+// ImplicitCast_<ToType>(expr)
+//
+// ImplicitCast_ would have been part of the C++ standard library,
+// but the proposal was submitted too late. It will probably make
+// its way into the language in the future.
+//
+// This relatively ugly name is intentional. It prevents clashes with
+// similar functions users may have (e.g., implicit_cast). The internal
+// namespace alone is not enough because the function can be found by ADL.
+template<typename To>
+inline To ImplicitCast_(To x) { return x; }
+
+// When you upcast (that is, cast a pointer from type Foo to type
+// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts
+// always succeed. When you downcast (that is, cast a pointer from
+// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
+// how do you know the pointer is really of type SubclassOfFoo? It
+// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus,
+// when you downcast, you should use this macro. In debug mode, we
+// use dynamic_cast<> to double-check the downcast is legal (we die
+// if it's not). In normal mode, we do the efficient static_cast<>
+// instead. Thus, it's important to test in debug mode to make sure
+// the cast is legal!
+// This is the only place in the code we should use dynamic_cast<>.
+// In particular, you SHOULDN'T be using dynamic_cast<> in order to
+// do RTTI (eg code like this:
+// if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);
+// if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
+// You should design the code some other way not to need this.
+//
+// This relatively ugly name is intentional. It prevents clashes with
+// similar functions users may have (e.g., down_cast). The internal
+// namespace alone is not enough because the function can be found by ADL.
+template<typename To, typename From> // use like this: DownCast_<T*>(foo);
+inline To DownCast_(From* f) { // so we only accept pointers
+ // Ensures that To is a sub-type of From *. This test is here only
+ // for compile-time type checking, and has no overhead in an
+ // optimized build at run-time, as it will be optimized away
+ // completely.
+ if (false) {
+ const To to = NULL;
+ ::testing::internal::ImplicitCast_<From*>(to);
+ }
+
+#if GTEST_HAS_RTTI
+ // RTTI: debug mode only!
+ GTEST_CHECK_(f == NULL || dynamic_cast<To>(f) != NULL);
+#endif
+ return static_cast<To>(f);
+}
+
+// Downcasts the pointer of type Base to Derived.
+// Derived must be a subclass of Base. The parameter MUST
+// point to a class of type Derived, not any subclass of it.
+// When RTTI is available, the function performs a runtime
+// check to enforce this.
+template <class Derived, class Base>
+Derived* CheckedDowncastToActualType(Base* base) {
+#if GTEST_HAS_RTTI
+ GTEST_CHECK_(typeid(*base) == typeid(Derived));
+ return dynamic_cast<Derived*>(base); // NOLINT
+#else
+ return static_cast<Derived*>(base); // Poor man's downcast.
+#endif
+}
+
+#if GTEST_HAS_STREAM_REDIRECTION
+
+// Defines the stderr capturer:
+// CaptureStdout - starts capturing stdout.
+// GetCapturedStdout - stops capturing stdout and returns the captured string.
+// CaptureStderr - starts capturing stderr.
+// GetCapturedStderr - stops capturing stderr and returns the captured string.
+//
+GTEST_API_ void CaptureStdout();
+GTEST_API_ String GetCapturedStdout();
+GTEST_API_ void CaptureStderr();
+GTEST_API_ String GetCapturedStderr();
+
+#endif // GTEST_HAS_STREAM_REDIRECTION
+
+
+#if GTEST_HAS_DEATH_TEST
+
+// A copy of all command line arguments. Set by InitGoogleTest().
+extern ::std::vector<String> g_argvs;
+
+// GTEST_HAS_DEATH_TEST implies we have ::std::string.
+const ::std::vector<String>& GetArgvs();
+
+#endif // GTEST_HAS_DEATH_TEST
+
+// Defines synchronization primitives.
+
+#if GTEST_HAS_PTHREAD
+
+// Sleeps for (roughly) n milli-seconds. This function is only for
+// testing Google Test's own constructs. Don't use it in user tests,
+// either directly or indirectly.
+inline void SleepMilliseconds(int n) {
+ const timespec time = {
+ 0, // 0 seconds.
+ n * 1000L * 1000L, // And n ms.
+ };
+ nanosleep(&time, NULL);
+}
+
+// Allows a controller thread to pause execution of newly created
+// threads until notified. Instances of this class must be created
+// and destroyed in the controller thread.
+//
+// This class is only for testing Google Test's own constructs. Do not
+// use it in user tests, either directly or indirectly.
+class Notification {
+ public:
+ Notification() : notified_(false) {}
+
+ // Notifies all threads created with this notification to start. Must
+ // be called from the controller thread.
+ void Notify() { notified_ = true; }
+
+ // Blocks until the controller thread notifies. Must be called from a test
+ // thread.
+ void WaitForNotification() {
+ while(!notified_) {
+ SleepMilliseconds(10);
+ }
+ }
+
+ private:
+ volatile bool notified_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
+};
+
+// As a C-function, ThreadFuncWithCLinkage cannot be templated itself.
+// Consequently, it cannot select a correct instantiation of ThreadWithParam
+// in order to call its Run(). Introducing ThreadWithParamBase as a
+// non-templated base class for ThreadWithParam allows us to bypass this
+// problem.
+class ThreadWithParamBase {
+ public:
+ virtual ~ThreadWithParamBase() {}
+ virtual void Run() = 0;
+};
+
+// pthread_create() accepts a pointer to a function type with the C linkage.
+// According to the Standard (7.5/1), function types with different linkages
+// are different even if they are otherwise identical. Some compilers (for
+// example, SunStudio) treat them as different types. Since class methods
+// cannot be defined with C-linkage we need to define a free C-function to
+// pass into pthread_create().
+extern "C" inline void* ThreadFuncWithCLinkage(void* thread) {
+ static_cast<ThreadWithParamBase*>(thread)->Run();
+ return NULL;
+}
+
+// Helper class for testing Google Test's multi-threading constructs.
+// To use it, write:
+//
+// void ThreadFunc(int param) { /* Do things with param */ }
+// Notification thread_can_start;
+// ...
+// // The thread_can_start parameter is optional; you can supply NULL.
+// ThreadWithParam<int> thread(&ThreadFunc, 5, &thread_can_start);
+// thread_can_start.Notify();
+//
+// These classes are only for testing Google Test's own constructs. Do
+// not use them in user tests, either directly or indirectly.
+template <typename T>
+class ThreadWithParam : public ThreadWithParamBase {
+ public:
+ typedef void (*UserThreadFunc)(T);
+
+ ThreadWithParam(
+ UserThreadFunc func, T param, Notification* thread_can_start)
+ : func_(func),
+ param_(param),
+ thread_can_start_(thread_can_start),
+ finished_(false) {
+ ThreadWithParamBase* const base = this;
+ // The thread can be created only after all fields except thread_
+ // have been initialized.
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base));
+ }
+ ~ThreadWithParam() { Join(); }
+
+ void Join() {
+ if (!finished_) {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0));
+ finished_ = true;
+ }
+ }
+
+ virtual void Run() {
+ if (thread_can_start_ != NULL)
+ thread_can_start_->WaitForNotification();
+ func_(param_);
+ }
+
+ private:
+ const UserThreadFunc func_; // User-supplied thread function.
+ const T param_; // User-supplied parameter to the thread function.
+ // When non-NULL, used to block execution until the controller thread
+ // notifies.
+ Notification* const thread_can_start_;
+ bool finished_; // true iff we know that the thread function has finished.
+ pthread_t thread_; // The native thread object.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
+};
+
+// MutexBase and Mutex implement mutex on pthreads-based platforms. They
+// are used in conjunction with class MutexLock:
+//
+// Mutex mutex;
+// ...
+// MutexLock lock(&mutex); // Acquires the mutex and releases it at the end
+// // of the current scope.
+//
+// MutexBase implements behavior for both statically and dynamically
+// allocated mutexes. Do not use MutexBase directly. Instead, write
+// the following to define a static mutex:
+//
+// GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex);
+//
+// You can forward declare a static mutex like this:
+//
+// GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex);
+//
+// To create a dynamic mutex, just define an object of type Mutex.
+class MutexBase {
+ public:
+ // Acquires this mutex.
+ void Lock() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_));
+ owner_ = pthread_self();
+ }
+
+ // Releases this mutex.
+ void Unlock() {
+ // We don't protect writing to owner_ here, as it's the caller's
+ // responsibility to ensure that the current thread holds the
+ // mutex when this is called.
+ owner_ = 0;
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_));
+ }
+
+ // Does nothing if the current thread holds the mutex. Otherwise, crashes
+ // with high probability.
+ void AssertHeld() const {
+ GTEST_CHECK_(owner_ == pthread_self())
+ << "The current thread is not holding the mutex @" << this;
+ }
+
+ // A static mutex may be used before main() is entered. It may even
+ // be used before the dynamic initialization stage. Therefore we
+ // must be able to initialize a static mutex object at link time.
+ // This means MutexBase has to be a POD and its member variables
+ // have to be public.
+ public:
+ pthread_mutex_t mutex_; // The underlying pthread mutex.
+ pthread_t owner_; // The thread holding the mutex; 0 means no one holds it.
+};
+
+// Forward-declares a static mutex.
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::MutexBase mutex
+
+// Defines and statically (i.e. at link time) initializes a static mutex.
+# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
+ ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, 0 }
+
+// The Mutex class can only be used for mutexes created at runtime. It
+// shares its API with MutexBase otherwise.
+class Mutex : public MutexBase {
+ public:
+ Mutex() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
+ owner_ = 0;
+ }
+ ~Mutex() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_));
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
+};
+
+// We cannot name this class MutexLock as the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(MutexBase* mutex)
+ : mutex_(mutex) { mutex_->Lock(); }
+
+ ~GTestMutexLock() { mutex_->Unlock(); }
+
+ private:
+ MutexBase* const mutex_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
+};
+
+typedef GTestMutexLock MutexLock;
+
+// Helpers for ThreadLocal.
+
+// pthread_key_create() requires DeleteThreadLocalValue() to have
+// C-linkage. Therefore it cannot be templatized to access
+// ThreadLocal<T>. Hence the need for class
+// ThreadLocalValueHolderBase.
+class ThreadLocalValueHolderBase {
+ public:
+ virtual ~ThreadLocalValueHolderBase() {}
+};
+
+// Called by pthread to delete thread-local data stored by
+// pthread_setspecific().
+extern "C" inline void DeleteThreadLocalValue(void* value_holder) {
+ delete static_cast<ThreadLocalValueHolderBase*>(value_holder);
+}
+
+// Implements thread-local storage on pthreads-based systems.
+//
+// // Thread 1
+// ThreadLocal<int> tl(100); // 100 is the default value for each thread.
+//
+// // Thread 2
+// tl.set(150); // Changes the value for thread 2 only.
+// EXPECT_EQ(150, tl.get());
+//
+// // Thread 1
+// EXPECT_EQ(100, tl.get()); // In thread 1, tl has the original value.
+// tl.set(200);
+// EXPECT_EQ(200, tl.get());
+//
+// The template type argument T must have a public copy constructor.
+// In addition, the default ThreadLocal constructor requires T to have
+// a public default constructor.
+//
+// An object managed for a thread by a ThreadLocal instance is deleted
+// when the thread exits. Or, if the ThreadLocal instance dies in
+// that thread, when the ThreadLocal dies. It's the user's
+// responsibility to ensure that all other threads using a ThreadLocal
+// have exited when it dies, or the per-thread objects for those
+// threads will not be deleted.
+//
+// Google Test only uses global ThreadLocal objects. That means they
+// will die after main() has returned. Therefore, no per-thread
+// object managed by Google Test will be leaked as long as all threads
+// using Google Test have exited when main() returns.
+template <typename T>
+class ThreadLocal {
+ public:
+ ThreadLocal() : key_(CreateKey()),
+ default_() {}
+ explicit ThreadLocal(const T& value) : key_(CreateKey()),
+ default_(value) {}
+
+ ~ThreadLocal() {
+ // Destroys the managed object for the current thread, if any.
+ DeleteThreadLocalValue(pthread_getspecific(key_));
+
+ // Releases resources associated with the key. This will *not*
+ // delete managed objects for other threads.
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_));
+ }
+
+ T* pointer() { return GetOrCreateValue(); }
+ const T* pointer() const { return GetOrCreateValue(); }
+ const T& get() const { return *pointer(); }
+ void set(const T& value) { *pointer() = value; }
+
+ private:
+ // Holds a value of type T.
+ class ValueHolder : public ThreadLocalValueHolderBase {
+ public:
+ explicit ValueHolder(const T& value) : value_(value) {}
+
+ T* pointer() { return &value_; }
+
+ private:
+ T value_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);
+ };
+
+ static pthread_key_t CreateKey() {
+ pthread_key_t key;
+ // When a thread exits, DeleteThreadLocalValue() will be called on
+ // the object managed for that thread.
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_key_create(&key, &DeleteThreadLocalValue));
+ return key;
+ }
+
+ T* GetOrCreateValue() const {
+ ThreadLocalValueHolderBase* const holder =
+ static_cast<ThreadLocalValueHolderBase*>(pthread_getspecific(key_));
+ if (holder != NULL) {
+ return CheckedDowncastToActualType<ValueHolder>(holder)->pointer();
+ }
+
+ ValueHolder* const new_holder = new ValueHolder(default_);
+ ThreadLocalValueHolderBase* const holder_base = new_holder;
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base));
+ return new_holder->pointer();
+ }
+
+ // A key pthreads uses for looking up per-thread values.
+ const pthread_key_t key_;
+ const T default_; // The default value for each thread.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);
+};
+
+# define GTEST_IS_THREADSAFE 1
+
+#else // GTEST_HAS_PTHREAD
+
+// A dummy implementation of synchronization primitives (mutex, lock,
+// and thread-local variable). Necessary for compiling Google Test where
+// mutex is not supported - using Google Test in multiple threads is not
+// supported on such platforms.
+
+class Mutex {
+ public:
+ Mutex() {}
+ void AssertHeld() const {}
+};
+
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::Mutex mutex
+
+# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex
+
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(Mutex*) {} // NOLINT
+};
+
+typedef GTestMutexLock MutexLock;
+
+template <typename T>
+class ThreadLocal {
+ public:
+ ThreadLocal() : value_() {}
+ explicit ThreadLocal(const T& value) : value_(value) {}
+ T* pointer() { return &value_; }
+ const T* pointer() const { return &value_; }
+ const T& get() const { return value_; }
+ void set(const T& value) { value_ = value; }
+ private:
+ T value_;
+};
+
+// The above synchronization primitives have dummy implementations.
+// Therefore Google Test is not thread-safe.
+# define GTEST_IS_THREADSAFE 0
+
+#endif // GTEST_HAS_PTHREAD
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+GTEST_API_ size_t GetThreadCount();
+
+// Passing non-POD classes through ellipsis (...) crashes the ARM
+// compiler and generates a warning in Sun Studio. The Nokia Symbian
+// and the IBM XL C/C++ compiler try to instantiate a copy constructor
+// for objects passed through ellipsis (...), failing for uncopyable
+// objects. We define this to ensure that only POD is passed through
+// ellipsis on these systems.
+#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC)
+// We lose support for NULL detection where the compiler doesn't like
+// passing non-POD classes through ellipsis (...).
+# define GTEST_ELLIPSIS_NEEDS_POD_ 1
+#else
+# define GTEST_CAN_COMPARE_NULL 1
+#endif
+
+// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between
+// const T& and const T* in a function template. These compilers
+// _can_ decide between class template specializations for T and T*,
+// so a tr1::type_traits-like is_pointer works.
+#if defined(__SYMBIAN32__) || defined(__IBMCPP__)
+# define GTEST_NEEDS_IS_POINTER_ 1
+#endif
+
+template <bool bool_value>
+struct bool_constant {
+ typedef bool_constant<bool_value> type;
+ static const bool value = bool_value;
+};
+template <bool bool_value> const bool bool_constant<bool_value>::value;
+
+typedef bool_constant<false> false_type;
+typedef bool_constant<true> true_type;
+
+template <typename T>
+struct is_pointer : public false_type {};
+
+template <typename T>
+struct is_pointer<T*> : public true_type {};
+
+template <typename Iterator>
+struct IteratorTraits {
+ typedef typename Iterator::value_type value_type;
+};
+
+template <typename T>
+struct IteratorTraits<T*> {
+ typedef T value_type;
+};
+
+template <typename T>
+struct IteratorTraits<const T*> {
+ typedef T value_type;
+};
+
+#if GTEST_OS_WINDOWS
+# define GTEST_PATH_SEP_ "\\"
+# define GTEST_HAS_ALT_PATH_SEP_ 1
+// The biggest signed integer type the compiler supports.
+typedef __int64 BiggestInt;
+#else
+# define GTEST_PATH_SEP_ "/"
+# define GTEST_HAS_ALT_PATH_SEP_ 0
+typedef long long BiggestInt; // NOLINT
+#endif // GTEST_OS_WINDOWS
+
+// Utilities for char.
+
+// isspace(int ch) and friends accept an unsigned char or EOF. char
+// may be signed, depending on the compiler (or compiler flags).
+// Therefore we need to cast a char to unsigned char before calling
+// isspace(), etc.
+
+inline bool IsAlpha(char ch) {
+ return isalpha(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsAlNum(char ch) {
+ return isalnum(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsDigit(char ch) {
+ return isdigit(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsLower(char ch) {
+ return islower(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsSpace(char ch) {
+ return isspace(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsUpper(char ch) {
+ return isupper(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsXDigit(char ch) {
+ return isxdigit(static_cast<unsigned char>(ch)) != 0;
+}
+
+inline char ToLower(char ch) {
+ return static_cast<char>(tolower(static_cast<unsigned char>(ch)));
+}
+inline char ToUpper(char ch) {
+ return static_cast<char>(toupper(static_cast<unsigned char>(ch)));
+}
+
+// The testing::internal::posix namespace holds wrappers for common
+// POSIX functions. These wrappers hide the differences between
+// Windows/MSVC and POSIX systems. Since some compilers define these
+// standard functions as macros, the wrapper cannot have the same name
+// as the wrapped function.
+
+namespace posix {
+
+// Functions with a different name on Windows.
+
+#if GTEST_OS_WINDOWS
+
+typedef struct _stat StatStruct;
+
+# ifdef __BORLANDC__
+inline int IsATTY(int fd) { return isatty(fd); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+# else // !__BORLANDC__
+# if GTEST_OS_WINDOWS_MOBILE
+inline int IsATTY(int /* fd */) { return 0; }
+# else
+inline int IsATTY(int fd) { return _isatty(fd); }
+# endif // GTEST_OS_WINDOWS_MOBILE
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return _stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return _strdup(src); }
+# endif // __BORLANDC__
+
+# if GTEST_OS_WINDOWS_MOBILE
+inline int FileNo(FILE* file) { return reinterpret_cast<int>(_fileno(file)); }
+// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this
+// time and thus not defined there.
+# else
+inline int FileNo(FILE* file) { return _fileno(file); }
+inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); }
+inline int RmDir(const char* dir) { return _rmdir(dir); }
+inline bool IsDir(const StatStruct& st) {
+ return (_S_IFDIR & st.st_mode) != 0;
+}
+# endif // GTEST_OS_WINDOWS_MOBILE
+
+#else
+
+typedef struct stat StatStruct;
+
+inline int FileNo(FILE* file) { return fileno(file); }
+inline int IsATTY(int fd) { return isatty(fd); }
+inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return strcasecmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+inline int RmDir(const char* dir) { return rmdir(dir); }
+inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); }
+
+#endif // GTEST_OS_WINDOWS
+
+// Functions deprecated by MSVC 8.0.
+
+#ifdef _MSC_VER
+// Temporarily disable warning 4996 (deprecated function).
+# pragma warning(push)
+# pragma warning(disable:4996)
+#endif
+
+inline const char* StrNCpy(char* dest, const char* src, size_t n) {
+ return strncpy(dest, src, n);
+}
+
+// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and
+// StrError() aren't needed on Windows CE at this time and thus not
+// defined there.
+
+#if !GTEST_OS_WINDOWS_MOBILE
+inline int ChDir(const char* dir) { return chdir(dir); }
+#endif
+inline FILE* FOpen(const char* path, const char* mode) {
+ return fopen(path, mode);
+}
+#if !GTEST_OS_WINDOWS_MOBILE
+inline FILE *FReopen(const char* path, const char* mode, FILE* stream) {
+ return freopen(path, mode, stream);
+}
+inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); }
+#endif
+inline int FClose(FILE* fp) { return fclose(fp); }
+#if !GTEST_OS_WINDOWS_MOBILE
+inline int Read(int fd, void* buf, unsigned int count) {
+ return static_cast<int>(read(fd, buf, count));
+}
+inline int Write(int fd, const void* buf, unsigned int count) {
+ return static_cast<int>(write(fd, buf, count));
+}
+inline int Close(int fd) { return close(fd); }
+inline const char* StrError(int errnum) { return strerror(errnum); }
+#endif
+inline const char* GetEnv(const char* name) {
+#if GTEST_OS_WINDOWS_MOBILE
+ // We are on Windows CE, which has no environment variables.
+ return NULL;
+#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9)
+ // Environment variables which we programmatically clear will be set to the
+ // empty string rather than unset (NULL). Handle that case.
+ const char* const env = getenv(name);
+ return (env != NULL && env[0] != '\0') ? env : NULL;
+#else
+ return getenv(name);
+#endif
+}
+
+#ifdef _MSC_VER
+# pragma warning(pop) // Restores the warning state.
+#endif
+
+#if GTEST_OS_WINDOWS_MOBILE
+// Windows CE has no C library. The abort() function is used in
+// several places in Google Test. This implementation provides a reasonable
+// imitation of standard behaviour.
+void Abort();
+#else
+inline void Abort() { abort(); }
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+} // namespace posix
+
+// The maximum number a BiggestInt can represent. This definition
+// works no matter BiggestInt is represented in one's complement or
+// two's complement.
+//
+// We cannot rely on numeric_limits in STL, as __int64 and long long
+// are not part of standard C++ and numeric_limits doesn't need to be
+// defined for them.
+const BiggestInt kMaxBiggestInt =
+ ~(static_cast<BiggestInt>(1) << (8*sizeof(BiggestInt) - 1));
+
+// This template class serves as a compile-time function from size to
+// type. It maps a size in bytes to a primitive type with that
+// size. e.g.
+//
+// TypeWithSize<4>::UInt
+//
+// is typedef-ed to be unsigned int (unsigned integer made up of 4
+// bytes).
+//
+// Such functionality should belong to STL, but I cannot find it
+// there.
+//
+// Google Test uses this class in the implementation of floating-point
+// comparison.
+//
+// For now it only handles UInt (unsigned int) as that's all Google Test
+// needs. Other types can be easily added in the future if need
+// arises.
+template <size_t size>
+class TypeWithSize {
+ public:
+ // This prevents the user from using TypeWithSize<N> with incorrect
+ // values of N.
+ typedef void UInt;
+};
+
+// The specialization for size 4.
+template <>
+class TypeWithSize<4> {
+ public:
+ // unsigned int has size 4 in both gcc and MSVC.
+ //
+ // As base/basictypes.h doesn't compile on Windows, we cannot use
+ // uint32, uint64, and etc here.
+ typedef int Int;
+ typedef unsigned int UInt;
+};
+
+// The specialization for size 8.
+template <>
+class TypeWithSize<8> {
+ public:
+
+#if GTEST_OS_WINDOWS
+ typedef __int64 Int;
+ typedef unsigned __int64 UInt;
+#else
+ typedef long long Int; // NOLINT
+ typedef unsigned long long UInt; // NOLINT
+#endif // GTEST_OS_WINDOWS
+};
+
+// Integer types of known sizes.
+typedef TypeWithSize<4>::Int Int32;
+typedef TypeWithSize<4>::UInt UInt32;
+typedef TypeWithSize<8>::Int Int64;
+typedef TypeWithSize<8>::UInt UInt64;
+typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds.
+
+// Utilities for command line flags and environment variables.
+
+// Macro for referencing flags.
+#define GTEST_FLAG(name) FLAGS_gtest_##name
+
+// Macros for declaring flags.
+#define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name)
+#define GTEST_DECLARE_int32_(name) \
+ GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name)
+#define GTEST_DECLARE_string_(name) \
+ GTEST_API_ extern ::testing::internal::String GTEST_FLAG(name)
+
+// Macros for defining flags.
+#define GTEST_DEFINE_bool_(name, default_val, doc) \
+ GTEST_API_ bool GTEST_FLAG(name) = (default_val)
+#define GTEST_DEFINE_int32_(name, default_val, doc) \
+ GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val)
+#define GTEST_DEFINE_string_(name, default_val, doc) \
+ GTEST_API_ ::testing::internal::String GTEST_FLAG(name) = (default_val)
+
+// Parses 'str' for a 32-bit signed integer. If successful, writes the result
+// to *value and returns true; otherwise leaves *value unchanged and returns
+// false.
+// TODO(chandlerc): Find a better way to refactor flag and environment parsing
+// out of both gtest-port.cc and gtest.cc to avoid exporting this utility
+// function.
+bool ParseInt32(const Message& src_text, const char* str, Int32* value);
+
+// Parses a bool/Int32/string from the environment variable
+// corresponding to the given Google Test flag.
+bool BoolFromGTestEnv(const char* flag, bool default_val);
+GTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val);
+const char* StringFromGTestEnv(const char* flag, const char* default_val);
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+
+#if GTEST_OS_LINUX
+# include <stdlib.h>
+# include <sys/types.h>
+# include <sys/wait.h>
+# include <unistd.h>
+#endif // GTEST_OS_LINUX
+
+#include <ctype.h>
+#include <string.h>
+#include <iomanip>
+#include <limits>
+#include <set>
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file declares the String class and functions used internally by
+// Google Test. They are subject to change without notice. They should not used
+// by code external to Google Test.
+//
+// This header file is #included by <gtest/internal/gtest-internal.h>.
+// It should not be #included by other files.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+
+#ifdef __BORLANDC__
+// string.h is not guaranteed to provide strcpy on C++ Builder.
+# include <mem.h>
+#endif
+
+#include <string.h>
+
+#include <string>
+
+namespace testing {
+namespace internal {
+
+// String - a UTF-8 string class.
+//
+// For historic reasons, we don't use std::string.
+//
+// TODO(wan@google.com): replace this class with std::string or
+// implement it in terms of the latter.
+//
+// Note that String can represent both NULL and the empty string,
+// while std::string cannot represent NULL.
+//
+// NULL and the empty string are considered different. NULL is less
+// than anything (including the empty string) except itself.
+//
+// This class only provides minimum functionality necessary for
+// implementing Google Test. We do not intend to implement a full-fledged
+// string class here.
+//
+// Since the purpose of this class is to provide a substitute for
+// std::string on platforms where it cannot be used, we define a copy
+// constructor and assignment operators such that we don't need
+// conditional compilation in a lot of places.
+//
+// In order to make the representation efficient, the d'tor of String
+// is not virtual. Therefore DO NOT INHERIT FROM String.
+class GTEST_API_ String {
+ public:
+ // Static utility methods
+
+ // Returns the input enclosed in double quotes if it's not NULL;
+ // otherwise returns "(null)". For example, "\"Hello\"" is returned
+ // for input "Hello".
+ //
+ // This is useful for printing a C string in the syntax of a literal.
+ //
+ // Known issue: escape sequences are not handled yet.
+ static String ShowCStringQuoted(const char* c_str);
+
+ // Clones a 0-terminated C string, allocating memory using new. The
+ // caller is responsible for deleting the return value using
+ // delete[]. Returns the cloned string, or NULL if the input is
+ // NULL.
+ //
+ // This is different from strdup() in string.h, which allocates
+ // memory using malloc().
+ static const char* CloneCString(const char* c_str);
+
+#if GTEST_OS_WINDOWS_MOBILE
+ // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be
+ // able to pass strings to Win32 APIs on CE we need to convert them
+ // to 'Unicode', UTF-16.
+
+ // Creates a UTF-16 wide string from the given ANSI string, allocating
+ // memory using new. The caller is responsible for deleting the return
+ // value using delete[]. Returns the wide string, or NULL if the
+ // input is NULL.
+ //
+ // The wide string is created using the ANSI codepage (CP_ACP) to
+ // match the behaviour of the ANSI versions of Win32 calls and the
+ // C runtime.
+ static LPCWSTR AnsiToUtf16(const char* c_str);
+
+ // Creates an ANSI string from the given wide string, allocating
+ // memory using new. The caller is responsible for deleting the return
+ // value using delete[]. Returns the ANSI string, or NULL if the
+ // input is NULL.
+ //
+ // The returned string is created using the ANSI codepage (CP_ACP) to
+ // match the behaviour of the ANSI versions of Win32 calls and the
+ // C runtime.
+ static const char* Utf16ToAnsi(LPCWSTR utf16_str);
+#endif
+
+ // Compares two C strings. Returns true iff they have the same content.
+ //
+ // Unlike strcmp(), this function can handle NULL argument(s). A
+ // NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool CStringEquals(const char* lhs, const char* rhs);
+
+ // Converts a wide C string to a String using the UTF-8 encoding.
+ // NULL will be converted to "(null)". If an error occurred during
+ // the conversion, "(failed to convert from wide string)" is
+ // returned.
+ static String ShowWideCString(const wchar_t* wide_c_str);
+
+ // Similar to ShowWideCString(), except that this function encloses
+ // the converted string in double quotes.
+ static String ShowWideCStringQuoted(const wchar_t* wide_c_str);
+
+ // Compares two wide C strings. Returns true iff they have the same
+ // content.
+ //
+ // Unlike wcscmp(), this function can handle NULL argument(s). A
+ // NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs);
+
+ // Compares two C strings, ignoring case. Returns true iff they
+ // have the same content.
+ //
+ // Unlike strcasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool CaseInsensitiveCStringEquals(const char* lhs,
+ const char* rhs);
+
+ // Compares two wide C strings, ignoring case. Returns true iff they
+ // have the same content.
+ //
+ // Unlike wcscasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL wide C string,
+ // including the empty string.
+ // NB: The implementations on different platforms slightly differ.
+ // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
+ // environment variable. On GNU platform this method uses wcscasecmp
+ // which compares according to LC_CTYPE category of the current locale.
+ // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
+ // current locale.
+ static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
+ const wchar_t* rhs);
+
+ // Formats a list of arguments to a String, using the same format
+ // spec string as for printf.
+ //
+ // We do not use the StringPrintf class as it is not universally
+ // available.
+ //
+ // The result is limited to 4096 characters (including the tailing
+ // 0). If 4096 characters are not enough to format the input,
+ // "<buffer exceeded>" is returned.
+ static String Format(const char* format, ...);
+
+ // C'tors
+
+ // The default c'tor constructs a NULL string.
+ String() : c_str_(NULL), length_(0) {}
+
+ // Constructs a String by cloning a 0-terminated C string.
+ String(const char* a_c_str) { // NOLINT
+ if (a_c_str == NULL) {
+ c_str_ = NULL;
+ length_ = 0;
+ } else {
+ ConstructNonNull(a_c_str, strlen(a_c_str));
+ }
+ }
+
+ // Constructs a String by copying a given number of chars from a
+ // buffer. E.g. String("hello", 3) creates the string "hel",
+ // String("a\0bcd", 4) creates "a\0bc", String(NULL, 0) creates "",
+ // and String(NULL, 1) results in access violation.
+ String(const char* buffer, size_t a_length) {
+ ConstructNonNull(buffer, a_length);
+ }
+
+ // The copy c'tor creates a new copy of the string. The two
+ // String objects do not share content.
+ String(const String& str) : c_str_(NULL), length_(0) { *this = str; }
+
+ // D'tor. String is intended to be a final class, so the d'tor
+ // doesn't need to be virtual.
+ ~String() { delete[] c_str_; }
+
+ // Allows a String to be implicitly converted to an ::std::string or
+ // ::string, and vice versa. Converting a String containing a NULL
+ // pointer to ::std::string or ::string is undefined behavior.
+ // Converting a ::std::string or ::string containing an embedded NUL
+ // character to a String will result in the prefix up to the first
+ // NUL character.
+ String(const ::std::string& str) {
+ ConstructNonNull(str.c_str(), str.length());
+ }
+
+ operator ::std::string() const { return ::std::string(c_str(), length()); }
+
+#if GTEST_HAS_GLOBAL_STRING
+ String(const ::string& str) {
+ ConstructNonNull(str.c_str(), str.length());
+ }
+
+ operator ::string() const { return ::string(c_str(), length()); }
+#endif // GTEST_HAS_GLOBAL_STRING
+
+ // Returns true iff this is an empty string (i.e. "").
+ bool empty() const { return (c_str() != NULL) && (length() == 0); }
+
+ // Compares this with another String.
+ // Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0
+ // if this is greater than rhs.
+ int Compare(const String& rhs) const;
+
+ // Returns true iff this String equals the given C string. A NULL
+ // string and a non-NULL string are considered not equal.
+ bool operator==(const char* a_c_str) const { return Compare(a_c_str) == 0; }
+
+ // Returns true iff this String is less than the given String. A
+ // NULL string is considered less than "".
+ bool operator<(const String& rhs) const { return Compare(rhs) < 0; }
+
+ // Returns true iff this String doesn't equal the given C string. A NULL
+ // string and a non-NULL string are considered not equal.
+ bool operator!=(const char* a_c_str) const { return !(*this == a_c_str); }
+
+ // Returns true iff this String ends with the given suffix. *Any*
+ // String is considered to end with a NULL or empty suffix.
+ bool EndsWith(const char* suffix) const;
+
+ // Returns true iff this String ends with the given suffix, not considering
+ // case. Any String is considered to end with a NULL or empty suffix.
+ bool EndsWithCaseInsensitive(const char* suffix) const;
+
+ // Returns the length of the encapsulated string, or 0 if the
+ // string is NULL.
+ size_t length() const { return length_; }
+
+ // Gets the 0-terminated C string this String object represents.
+ // The String object still owns the string. Therefore the caller
+ // should NOT delete the return value.
+ const char* c_str() const { return c_str_; }
+
+ // Assigns a C string to this object. Self-assignment works.
+ const String& operator=(const char* a_c_str) {
+ return *this = String(a_c_str);
+ }
+
+ // Assigns a String object to this object. Self-assignment works.
+ const String& operator=(const String& rhs) {
+ if (this != &rhs) {
+ delete[] c_str_;
+ if (rhs.c_str() == NULL) {
+ c_str_ = NULL;
+ length_ = 0;
+ } else {
+ ConstructNonNull(rhs.c_str(), rhs.length());
+ }
+ }
+
+ return *this;
+ }
+
+ private:
+ // Constructs a non-NULL String from the given content. This
+ // function can only be called when c_str_ has not been allocated.
+ // ConstructNonNull(NULL, 0) results in an empty string ("").
+ // ConstructNonNull(NULL, non_zero) is undefined behavior.
+ void ConstructNonNull(const char* buffer, size_t a_length) {
+ char* const str = new char[a_length + 1];
+ memcpy(str, buffer, a_length);
+ str[a_length] = '\0';
+ c_str_ = str;
+ length_ = a_length;
+ }
+
+ const char* c_str_;
+ size_t length_;
+}; // class String
+
+// Streams a String to an ostream. Each '\0' character in the String
+// is replaced with "\\0".
+inline ::std::ostream& operator<<(::std::ostream& os, const String& str) {
+ if (str.c_str() == NULL) {
+ os << "(null)";
+ } else {
+ const char* const c_str = str.c_str();
+ for (size_t i = 0; i != str.length(); i++) {
+ if (c_str[i] == '\0') {
+ os << "\\0";
+ } else {
+ os << c_str[i];
+ }
+ }
+ }
+ return os;
+}
+
+// Gets the content of the stringstream's buffer as a String. Each '\0'
+// character in the buffer is replaced with "\\0".
+GTEST_API_ String StringStreamToString(::std::stringstream* stream);
+
+// Converts a streamable value to a String. A NULL pointer is
+// converted to "(null)". When the input value is a ::string,
+// ::std::string, ::wstring, or ::std::wstring object, each NUL
+// character in it is replaced with "\\0".
+
+// Declared here but defined in gtest.h, so that it has access
+// to the definition of the Message class, required by the ARM
+// compiler.
+template <typename T>
+String StreamableToString(const T& streamable);
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keith.ray@gmail.com (Keith Ray)
+//
+// Google Test filepath utilities
+//
+// This header file declares classes and functions used internally by
+// Google Test. They are subject to change without notice.
+//
+// This file is #included in <gtest/internal/gtest-internal.h>.
+// Do not include this header file separately!
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+
+
+namespace testing {
+namespace internal {
+
+// FilePath - a class for file and directory pathname manipulation which
+// handles platform-specific conventions (like the pathname separator).
+// Used for helper functions for naming files in a directory for xml output.
+// Except for Set methods, all methods are const or static, which provides an
+// "immutable value object" -- useful for peace of mind.
+// A FilePath with a value ending in a path separator ("like/this/") represents
+// a directory, otherwise it is assumed to represent a file. In either case,
+// it may or may not represent an actual file or directory in the file system.
+// Names are NOT checked for syntax correctness -- no checking for illegal
+// characters, malformed paths, etc.
+
+class GTEST_API_ FilePath {
+ public:
+ FilePath() : pathname_("") { }
+ FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { }
+
+ explicit FilePath(const char* pathname) : pathname_(pathname) {
+ Normalize();
+ }
+
+ explicit FilePath(const String& pathname) : pathname_(pathname) {
+ Normalize();
+ }
+
+ FilePath& operator=(const FilePath& rhs) {
+ Set(rhs);
+ return *this;
+ }
+
+ void Set(const FilePath& rhs) {
+ pathname_ = rhs.pathname_;
+ }
+
+ String ToString() const { return pathname_; }
+ const char* c_str() const { return pathname_.c_str(); }
+
+ // Returns the current working directory, or "" if unsuccessful.
+ static FilePath GetCurrentDir();
+
+ // Given directory = "dir", base_name = "test", number = 0,
+ // extension = "xml", returns "dir/test.xml". If number is greater
+ // than zero (e.g., 12), returns "dir/test_12.xml".
+ // On Windows platform, uses \ as the separator rather than /.
+ static FilePath MakeFileName(const FilePath& directory,
+ const FilePath& base_name,
+ int number,
+ const char* extension);
+
+ // Given directory = "dir", relative_path = "test.xml",
+ // returns "dir/test.xml".
+ // On Windows, uses \ as the separator rather than /.
+ static FilePath ConcatPaths(const FilePath& directory,
+ const FilePath& relative_path);
+
+ // Returns a pathname for a file that does not currently exist. The pathname
+ // will be directory/base_name.extension or
+ // directory/base_name_<number>.extension if directory/base_name.extension
+ // already exists. The number will be incremented until a pathname is found
+ // that does not already exist.
+ // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
+ // There could be a race condition if two or more processes are calling this
+ // function at the same time -- they could both pick the same filename.
+ static FilePath GenerateUniqueFileName(const FilePath& directory,
+ const FilePath& base_name,
+ const char* extension);
+
+ // Returns true iff the path is NULL or "".
+ bool IsEmpty() const { return c_str() == NULL || *c_str() == '\0'; }
+
+ // If input name has a trailing separator character, removes it and returns
+ // the name, otherwise return the name string unmodified.
+ // On Windows platform, uses \ as the separator, other platforms use /.
+ FilePath RemoveTrailingPathSeparator() const;
+
+ // Returns a copy of the FilePath with the directory part removed.
+ // Example: FilePath("path/to/file").RemoveDirectoryName() returns
+ // FilePath("file"). If there is no directory part ("just_a_file"), it returns
+ // the FilePath unmodified. If there is no file part ("just_a_dir/") it
+ // returns an empty FilePath ("").
+ // On Windows platform, '\' is the path separator, otherwise it is '/'.
+ FilePath RemoveDirectoryName() const;
+
+ // RemoveFileName returns the directory path with the filename removed.
+ // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
+ // If the FilePath is "a_file" or "/a_file", RemoveFileName returns
+ // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
+ // not have a file, like "just/a/dir/", it returns the FilePath unmodified.
+ // On Windows platform, '\' is the path separator, otherwise it is '/'.
+ FilePath RemoveFileName() const;
+
+ // Returns a copy of the FilePath with the case-insensitive extension removed.
+ // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
+ // FilePath("dir/file"). If a case-insensitive extension is not
+ // found, returns a copy of the original FilePath.
+ FilePath RemoveExtension(const char* extension) const;
+
+ // Creates directories so that path exists. Returns true if successful or if
+ // the directories already exist; returns false if unable to create
+ // directories for any reason. Will also return false if the FilePath does
+ // not represent a directory (that is, it doesn't end with a path separator).
+ bool CreateDirectoriesRecursively() const;
+
+ // Create the directory so that path exists. Returns true if successful or
+ // if the directory already exists; returns false if unable to create the
+ // directory for any reason, including if the parent directory does not
+ // exist. Not named "CreateDirectory" because that's a macro on Windows.
+ bool CreateFolder() const;
+
+ // Returns true if FilePath describes something in the file-system,
+ // either a file, directory, or whatever, and that something exists.
+ bool FileOrDirectoryExists() const;
+
+ // Returns true if pathname describes a directory in the file-system
+ // that exists.
+ bool DirectoryExists() const;
+
+ // Returns true if FilePath ends with a path separator, which indicates that
+ // it is intended to represent a directory. Returns false otherwise.
+ // This does NOT check that a directory (or file) actually exists.
+ bool IsDirectory() const;
+
+ // Returns true if pathname describes a root directory. (Windows has one
+ // root directory per disk drive.)
+ bool IsRootDirectory() const;
+
+ // Returns true if pathname describes an absolute path.
+ bool IsAbsolutePath() const;
+
+ private:
+ // Replaces multiple consecutive separators with a single separator.
+ // For example, "bar///foo" becomes "bar/foo". Does not eliminate other
+ // redundancies that might be in a pathname involving "." or "..".
+ //
+ // A pathname with multiple consecutive separators may occur either through
+ // user error or as a result of some scripts or APIs that generate a pathname
+ // with a trailing separator. On other platforms the same API or script
+ // may NOT generate a pathname with a trailing "/". Then elsewhere that
+ // pathname may have another "/" and pathname components added to it,
+ // without checking for the separator already being there.
+ // The script language and operating system may allow paths like "foo//bar"
+ // but some of the functions in FilePath will not handle that correctly. In
+ // particular, RemoveTrailingPathSeparator() only removes one separator, and
+ // it is called in CreateDirectoriesRecursively() assuming that it will change
+ // a pathname from directory syntax (trailing separator) to filename syntax.
+ //
+ // On Windows this method also replaces the alternate path separator '/' with
+ // the primary path separator '\\', so that for example "bar\\/\\foo" becomes
+ // "bar\\foo".
+
+ void Normalize();
+
+ // Returns a pointer to the last occurence of a valid path separator in
+ // the FilePath. On Windows, for example, both '/' and '\' are valid path
+ // separators. Returns NULL if no path separator was found.
+ const char* FindLastPathSeparator() const;
+
+ String pathname_;
+}; // class FilePath
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+// This file was GENERATED by command:
+// pump.py gtest-type-util.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Type utilities needed for implementing typed and type-parameterized
+// tests. This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+// Currently we support at most 50 types in a list, and at most 50
+// type-parameterized tests in one type-parameterized test case.
+// Please contact googletestframework@googlegroups.com if you need
+// more.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+
+
+// #ifdef __GNUC__ is too general here. It is possible to use gcc without using
+// libstdc++ (which is where cxxabi.h comes from).
+# ifdef __GLIBCXX__
+# include <cxxabi.h>
+# elif defined(__HP_aCC)
+# include <acxx_demangle.h>
+# endif // __GLIBCXX__
+
+namespace testing {
+namespace internal {
+
+// GetTypeName<T>() returns a human-readable name of type T.
+// NB: This function is also used in Google Mock, so don't move it inside of
+// the typed-test-only section below.
+template <typename T>
+String GetTypeName() {
+# if GTEST_HAS_RTTI
+
+ const char* const name = typeid(T).name();
+# if defined(__GLIBCXX__) || defined(__HP_aCC)
+ int status = 0;
+ // gcc's implementation of typeid(T).name() mangles the type name,
+ // so we have to demangle it.
+# ifdef __GLIBCXX__
+ using abi::__cxa_demangle;
+# endif // __GLIBCXX__
+ char* const readable_name = __cxa_demangle(name, 0, 0, &status);
+ const String name_str(status == 0 ? readable_name : name);
+ free(readable_name);
+ return name_str;
+# else
+ return name;
+# endif // __GLIBCXX__ || __HP_aCC
+
+# else
+
+ return "<type>";
+
+# endif // GTEST_HAS_RTTI
+}
+
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// AssertyTypeEq<T1, T2>::type is defined iff T1 and T2 are the same
+// type. This can be used as a compile-time assertion to ensure that
+// two types are equal.
+
+template <typename T1, typename T2>
+struct AssertTypeEq;
+
+template <typename T>
+struct AssertTypeEq<T, T> {
+ typedef bool type;
+};
+
+// A unique type used as the default value for the arguments of class
+// template Types. This allows us to simulate variadic templates
+// (e.g. Types<int>, Type<int, double>, and etc), which C++ doesn't
+// support directly.
+struct None {};
+
+// The following family of struct and struct templates are used to
+// represent type lists. In particular, TypesN<T1, T2, ..., TN>
+// represents a type list with N types (T1, T2, ..., and TN) in it.
+// Except for Types0, every struct in the family has two member types:
+// Head for the first type in the list, and Tail for the rest of the
+// list.
+
+// The empty type list.
+struct Types0 {};
+
+// Type lists of length 1, 2, 3, and so on.
+
+template <typename T1>
+struct Types1 {
+ typedef T1 Head;
+ typedef Types0 Tail;
+};
+template <typename T1, typename T2>
+struct Types2 {
+ typedef T1 Head;
+ typedef Types1<T2> Tail;
+};
+
+template <typename T1, typename T2, typename T3>
+struct Types3 {
+ typedef T1 Head;
+ typedef Types2<T2, T3> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4>
+struct Types4 {
+ typedef T1 Head;
+ typedef Types3<T2, T3, T4> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+struct Types5 {
+ typedef T1 Head;
+ typedef Types4<T2, T3, T4, T5> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+struct Types6 {
+ typedef T1 Head;
+ typedef Types5<T2, T3, T4, T5, T6> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+struct Types7 {
+ typedef T1 Head;
+ typedef Types6<T2, T3, T4, T5, T6, T7> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+struct Types8 {
+ typedef T1 Head;
+ typedef Types7<T2, T3, T4, T5, T6, T7, T8> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+struct Types9 {
+ typedef T1 Head;
+ typedef Types8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+struct Types10 {
+ typedef T1 Head;
+ typedef Types9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+struct Types11 {
+ typedef T1 Head;
+ typedef Types10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+struct Types12 {
+ typedef T1 Head;
+ typedef Types11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+struct Types13 {
+ typedef T1 Head;
+ typedef Types12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+struct Types14 {
+ typedef T1 Head;
+ typedef Types13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+struct Types15 {
+ typedef T1 Head;
+ typedef Types14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+struct Types16 {
+ typedef T1 Head;
+ typedef Types15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+struct Types17 {
+ typedef T1 Head;
+ typedef Types16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+struct Types18 {
+ typedef T1 Head;
+ typedef Types17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+struct Types19 {
+ typedef T1 Head;
+ typedef Types18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+struct Types20 {
+ typedef T1 Head;
+ typedef Types19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+struct Types21 {
+ typedef T1 Head;
+ typedef Types20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+struct Types22 {
+ typedef T1 Head;
+ typedef Types21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+struct Types23 {
+ typedef T1 Head;
+ typedef Types22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+struct Types24 {
+ typedef T1 Head;
+ typedef Types23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+struct Types25 {
+ typedef T1 Head;
+ typedef Types24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+struct Types26 {
+ typedef T1 Head;
+ typedef Types25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+struct Types27 {
+ typedef T1 Head;
+ typedef Types26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+struct Types28 {
+ typedef T1 Head;
+ typedef Types27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+struct Types29 {
+ typedef T1 Head;
+ typedef Types28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+struct Types30 {
+ typedef T1 Head;
+ typedef Types29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+struct Types31 {
+ typedef T1 Head;
+ typedef Types30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+struct Types32 {
+ typedef T1 Head;
+ typedef Types31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+struct Types33 {
+ typedef T1 Head;
+ typedef Types32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+struct Types34 {
+ typedef T1 Head;
+ typedef Types33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+struct Types35 {
+ typedef T1 Head;
+ typedef Types34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+struct Types36 {
+ typedef T1 Head;
+ typedef Types35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+struct Types37 {
+ typedef T1 Head;
+ typedef Types36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+struct Types38 {
+ typedef T1 Head;
+ typedef Types37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+struct Types39 {
+ typedef T1 Head;
+ typedef Types38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+struct Types40 {
+ typedef T1 Head;
+ typedef Types39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+struct Types41 {
+ typedef T1 Head;
+ typedef Types40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+struct Types42 {
+ typedef T1 Head;
+ typedef Types41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+struct Types43 {
+ typedef T1 Head;
+ typedef Types42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+struct Types44 {
+ typedef T1 Head;
+ typedef Types43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+struct Types45 {
+ typedef T1 Head;
+ typedef Types44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+struct Types46 {
+ typedef T1 Head;
+ typedef Types45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+struct Types47 {
+ typedef T1 Head;
+ typedef Types46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+struct Types48 {
+ typedef T1 Head;
+ typedef Types47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+struct Types49 {
+ typedef T1 Head;
+ typedef Types48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+struct Types50 {
+ typedef T1 Head;
+ typedef Types49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49, T50> Tail;
+};
+
+
+} // namespace internal
+
+// We don't want to require the users to write TypesN<...> directly,
+// as that would require them to count the length. Types<...> is much
+// easier to write, but generates horrible messages when there is a
+// compiler error, as gcc insists on printing out each template
+// argument, even if it has the default value (this means Types<int>
+// will appear as Types<int, None, None, ..., None> in the compiler
+// errors).
+//
+// Our solution is to combine the best part of the two approaches: a
+// user would write Types<T1, ..., TN>, and Google Test will translate
+// that to TypesN<T1, ..., TN> internally to make error messages
+// readable. The translation is done by the 'type' member of the
+// Types template.
+template <typename T1 = internal::None, typename T2 = internal::None,
+ typename T3 = internal::None, typename T4 = internal::None,
+ typename T5 = internal::None, typename T6 = internal::None,
+ typename T7 = internal::None, typename T8 = internal::None,
+ typename T9 = internal::None, typename T10 = internal::None,
+ typename T11 = internal::None, typename T12 = internal::None,
+ typename T13 = internal::None, typename T14 = internal::None,
+ typename T15 = internal::None, typename T16 = internal::None,
+ typename T17 = internal::None, typename T18 = internal::None,
+ typename T19 = internal::None, typename T20 = internal::None,
+ typename T21 = internal::None, typename T22 = internal::None,
+ typename T23 = internal::None, typename T24 = internal::None,
+ typename T25 = internal::None, typename T26 = internal::None,
+ typename T27 = internal::None, typename T28 = internal::None,
+ typename T29 = internal::None, typename T30 = internal::None,
+ typename T31 = internal::None, typename T32 = internal::None,
+ typename T33 = internal::None, typename T34 = internal::None,
+ typename T35 = internal::None, typename T36 = internal::None,
+ typename T37 = internal::None, typename T38 = internal::None,
+ typename T39 = internal::None, typename T40 = internal::None,
+ typename T41 = internal::None, typename T42 = internal::None,
+ typename T43 = internal::None, typename T44 = internal::None,
+ typename T45 = internal::None, typename T46 = internal::None,
+ typename T47 = internal::None, typename T48 = internal::None,
+ typename T49 = internal::None, typename T50 = internal::None>
+struct Types {
+ typedef internal::Types50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
+};
+
+template <>
+struct Types<internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types0 type;
+};
+template <typename T1>
+struct Types<T1, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types1<T1> type;
+};
+template <typename T1, typename T2>
+struct Types<T1, T2, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types2<T1, T2> type;
+};
+template <typename T1, typename T2, typename T3>
+struct Types<T1, T2, T3, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types3<T1, T2, T3> type;
+};
+template <typename T1, typename T2, typename T3, typename T4>
+struct Types<T1, T2, T3, T4, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types4<T1, T2, T3, T4> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+struct Types<T1, T2, T3, T4, T5, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types5<T1, T2, T3, T4, T5> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+struct Types<T1, T2, T3, T4, T5, T6, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types6<T1, T2, T3, T4, T5, T6> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+struct Types<T1, T2, T3, T4, T5, T6, T7, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types7<T1, T2, T3, T4, T5, T6, T7> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types8<T1, T2, T3, T4, T5, T6, T7, T8> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, internal::None, internal::None, internal::None> {
+ typedef internal::Types47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, T48, internal::None, internal::None> {
+ typedef internal::Types48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, T48, T49, internal::None> {
+ typedef internal::Types49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48, T49> type;
+};
+
+namespace internal {
+
+# define GTEST_TEMPLATE_ template <typename T> class
+
+// The template "selector" struct TemplateSel<Tmpl> is used to
+// represent Tmpl, which must be a class template with one type
+// parameter, as a type. TemplateSel<Tmpl>::Bind<T>::type is defined
+// as the type Tmpl<T>. This allows us to actually instantiate the
+// template "selected" by TemplateSel<Tmpl>.
+//
+// This trick is necessary for simulating typedef for class templates,
+// which C++ doesn't support directly.
+template <GTEST_TEMPLATE_ Tmpl>
+struct TemplateSel {
+ template <typename T>
+ struct Bind {
+ typedef Tmpl<T> type;
+ };
+};
+
+# define GTEST_BIND_(TmplSel, T) \
+ TmplSel::template Bind<T>::type
+
+// A unique struct template used as the default value for the
+// arguments of class template Templates. This allows us to simulate
+// variadic templates (e.g. Templates<int>, Templates<int, double>,
+// and etc), which C++ doesn't support directly.
+template <typename T>
+struct NoneT {};
+
+// The following family of struct and struct templates are used to
+// represent template lists. In particular, TemplatesN<T1, T2, ...,
+// TN> represents a list of N templates (T1, T2, ..., and TN). Except
+// for Templates0, every struct in the family has two member types:
+// Head for the selector of the first template in the list, and Tail
+// for the rest of the list.
+
+// The empty template list.
+struct Templates0 {};
+
+// Template lists of length 1, 2, 3, and so on.
+
+template <GTEST_TEMPLATE_ T1>
+struct Templates1 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates0 Tail;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
+struct Templates2 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates1<T2> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
+struct Templates3 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates2<T2, T3> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4>
+struct Templates4 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates3<T2, T3, T4> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
+struct Templates5 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates4<T2, T3, T4, T5> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
+struct Templates6 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates5<T2, T3, T4, T5, T6> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7>
+struct Templates7 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates6<T2, T3, T4, T5, T6, T7> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
+struct Templates8 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates7<T2, T3, T4, T5, T6, T7, T8> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
+struct Templates9 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10>
+struct Templates10 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
+struct Templates11 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
+struct Templates12 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13>
+struct Templates13 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
+struct Templates14 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
+struct Templates15 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16>
+struct Templates16 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
+struct Templates17 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
+struct Templates18 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19>
+struct Templates19 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
+struct Templates20 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
+struct Templates21 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22>
+struct Templates22 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
+struct Templates23 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
+struct Templates24 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25>
+struct Templates25 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
+struct Templates26 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
+struct Templates27 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28>
+struct Templates28 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
+struct Templates29 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
+struct Templates30 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31>
+struct Templates31 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
+struct Templates32 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
+struct Templates33 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34>
+struct Templates34 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
+struct Templates35 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
+struct Templates36 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37>
+struct Templates37 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
+struct Templates38 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
+struct Templates39 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40>
+struct Templates40 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
+struct Templates41 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
+struct Templates42 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43>
+struct Templates43 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
+struct Templates44 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
+struct Templates45 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46>
+struct Templates46 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
+struct Templates47 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
+struct Templates48 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47, T48> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+ GTEST_TEMPLATE_ T49>
+struct Templates49 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47, T48, T49> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+ GTEST_TEMPLATE_ T49, GTEST_TEMPLATE_ T50>
+struct Templates50 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47, T48, T49, T50> Tail;
+};
+
+
+// We don't want to require the users to write TemplatesN<...> directly,
+// as that would require them to count the length. Templates<...> is much
+// easier to write, but generates horrible messages when there is a
+// compiler error, as gcc insists on printing out each template
+// argument, even if it has the default value (this means Templates<list>
+// will appear as Templates<list, NoneT, NoneT, ..., NoneT> in the compiler
+// errors).
+//
+// Our solution is to combine the best part of the two approaches: a
+// user would write Templates<T1, ..., TN>, and Google Test will translate
+// that to TemplatesN<T1, ..., TN> internally to make error messages
+// readable. The translation is done by the 'type' member of the
+// Templates template.
+template <GTEST_TEMPLATE_ T1 = NoneT, GTEST_TEMPLATE_ T2 = NoneT,
+ GTEST_TEMPLATE_ T3 = NoneT, GTEST_TEMPLATE_ T4 = NoneT,
+ GTEST_TEMPLATE_ T5 = NoneT, GTEST_TEMPLATE_ T6 = NoneT,
+ GTEST_TEMPLATE_ T7 = NoneT, GTEST_TEMPLATE_ T8 = NoneT,
+ GTEST_TEMPLATE_ T9 = NoneT, GTEST_TEMPLATE_ T10 = NoneT,
+ GTEST_TEMPLATE_ T11 = NoneT, GTEST_TEMPLATE_ T12 = NoneT,
+ GTEST_TEMPLATE_ T13 = NoneT, GTEST_TEMPLATE_ T14 = NoneT,
+ GTEST_TEMPLATE_ T15 = NoneT, GTEST_TEMPLATE_ T16 = NoneT,
+ GTEST_TEMPLATE_ T17 = NoneT, GTEST_TEMPLATE_ T18 = NoneT,
+ GTEST_TEMPLATE_ T19 = NoneT, GTEST_TEMPLATE_ T20 = NoneT,
+ GTEST_TEMPLATE_ T21 = NoneT, GTEST_TEMPLATE_ T22 = NoneT,
+ GTEST_TEMPLATE_ T23 = NoneT, GTEST_TEMPLATE_ T24 = NoneT,
+ GTEST_TEMPLATE_ T25 = NoneT, GTEST_TEMPLATE_ T26 = NoneT,
+ GTEST_TEMPLATE_ T27 = NoneT, GTEST_TEMPLATE_ T28 = NoneT,
+ GTEST_TEMPLATE_ T29 = NoneT, GTEST_TEMPLATE_ T30 = NoneT,
+ GTEST_TEMPLATE_ T31 = NoneT, GTEST_TEMPLATE_ T32 = NoneT,
+ GTEST_TEMPLATE_ T33 = NoneT, GTEST_TEMPLATE_ T34 = NoneT,
+ GTEST_TEMPLATE_ T35 = NoneT, GTEST_TEMPLATE_ T36 = NoneT,
+ GTEST_TEMPLATE_ T37 = NoneT, GTEST_TEMPLATE_ T38 = NoneT,
+ GTEST_TEMPLATE_ T39 = NoneT, GTEST_TEMPLATE_ T40 = NoneT,
+ GTEST_TEMPLATE_ T41 = NoneT, GTEST_TEMPLATE_ T42 = NoneT,
+ GTEST_TEMPLATE_ T43 = NoneT, GTEST_TEMPLATE_ T44 = NoneT,
+ GTEST_TEMPLATE_ T45 = NoneT, GTEST_TEMPLATE_ T46 = NoneT,
+ GTEST_TEMPLATE_ T47 = NoneT, GTEST_TEMPLATE_ T48 = NoneT,
+ GTEST_TEMPLATE_ T49 = NoneT, GTEST_TEMPLATE_ T50 = NoneT>
+struct Templates {
+ typedef Templates50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
+};
+
+template <>
+struct Templates<NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates0 type;
+};
+template <GTEST_TEMPLATE_ T1>
+struct Templates<T1, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates1<T1> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
+struct Templates<T1, T2, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates2<T1, T2> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
+struct Templates<T1, T2, T3, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates3<T1, T2, T3> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4>
+struct Templates<T1, T2, T3, T4, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates4<T1, T2, T3, T4> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
+struct Templates<T1, T2, T3, T4, T5, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates5<T1, T2, T3, T4, T5> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
+struct Templates<T1, T2, T3, T4, T5, T6, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates6<T1, T2, T3, T4, T5, T6> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates7<T1, T2, T3, T4, T5, T6, T7> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates8<T1, T2, T3, T4, T5, T6, T7, T8> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT> {
+ typedef Templates22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT> {
+ typedef Templates23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT> {
+ typedef Templates24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, T47, NoneT, NoneT, NoneT> {
+ typedef Templates47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, T47, T48, NoneT, NoneT> {
+ typedef Templates48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47, T48> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+ GTEST_TEMPLATE_ T49>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, T47, T48, T49, NoneT> {
+ typedef Templates49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47, T48, T49> type;
+};
+
+// The TypeList template makes it possible to use either a single type
+// or a Types<...> list in TYPED_TEST_CASE() and
+// INSTANTIATE_TYPED_TEST_CASE_P().
+
+template <typename T>
+struct TypeList { typedef Types1<T> type; };
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+struct TypeList<Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49, T50> > {
+ typedef typename Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>::type type;
+};
+
+#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+
+// Due to C++ preprocessor weirdness, we need double indirection to
+// concatenate two tokens when one of them is __LINE__. Writing
+//
+// foo ## __LINE__
+//
+// will result in the token foo__LINE__, instead of foo followed by
+// the current line number. For more details, see
+// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6
+#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar)
+#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar
+
+// Google Test defines the testing::Message class to allow construction of
+// test messages via the << operator. The idea is that anything
+// streamable to std::ostream can be streamed to a testing::Message.
+// This allows a user to use his own types in Google Test assertions by
+// overloading the << operator.
+//
+// util/gtl/stl_logging-inl.h overloads << for STL containers. These
+// overloads cannot be defined in the std namespace, as that will be
+// undefined behavior. Therefore, they are defined in the global
+// namespace instead.
+//
+// C++'s symbol lookup rule (i.e. Koenig lookup) says that these
+// overloads are visible in either the std namespace or the global
+// namespace, but not other namespaces, including the testing
+// namespace which Google Test's Message class is in.
+//
+// To allow STL containers (and other types that has a << operator
+// defined in the global namespace) to be used in Google Test assertions,
+// testing::Message must access the custom << operator from the global
+// namespace. Hence this helper function.
+//
+// Note: Jeffrey Yasskin suggested an alternative fix by "using
+// ::operator<<;" in the definition of Message's operator<<. That fix
+// doesn't require a helper function, but unfortunately doesn't
+// compile with MSVC.
+template <typename T>
+inline void GTestStreamToHelper(std::ostream* os, const T& val) {
+ *os << val;
+}
+
+class ProtocolMessage;
+namespace proto2 { class Message; }
+
+namespace testing {
+
+// Forward declarations.
+
+class AssertionResult; // Result of an assertion.
+class Message; // Represents a failure message.
+class Test; // Represents a test.
+class TestInfo; // Information about a test.
+class TestPartResult; // Result of a test part.
+class UnitTest; // A collection of test cases.
+
+template <typename T>
+::std::string PrintToString(const T& value);
+
+namespace internal {
+
+struct TraceInfo; // Information about a trace point.
+class ScopedTrace; // Implements scoped trace.
+class TestInfoImpl; // Opaque implementation of TestInfo
+class UnitTestImpl; // Opaque implementation of UnitTest
+
+// How many times InitGoogleTest() has been called.
+extern int g_init_gtest_count;
+
+// The text used in failure messages to indicate the start of the
+// stack trace.
+GTEST_API_ extern const char kStackTraceMarker[];
+
+// A secret type that Google Test users don't know about. It has no
+// definition on purpose. Therefore it's impossible to create a
+// Secret object, which is what we want.
+class Secret;
+
+// Two overloaded helpers for checking at compile time whether an
+// expression is a null pointer literal (i.e. NULL or any 0-valued
+// compile-time integral constant). Their return values have
+// different sizes, so we can use sizeof() to test which version is
+// picked by the compiler. These helpers have no implementations, as
+// we only need their signatures.
+//
+// Given IsNullLiteralHelper(x), the compiler will pick the first
+// version if x can be implicitly converted to Secret*, and pick the
+// second version otherwise. Since Secret is a secret and incomplete
+// type, the only expression a user can write that has type Secret* is
+// a null pointer literal. Therefore, we know that x is a null
+// pointer literal if and only if the first version is picked by the
+// compiler.
+char IsNullLiteralHelper(Secret* p);
+char (&IsNullLiteralHelper(...))[2]; // NOLINT
+
+// A compile-time bool constant that is true if and only if x is a
+// null pointer literal (i.e. NULL or any 0-valued compile-time
+// integral constant).
+#ifdef GTEST_ELLIPSIS_NEEDS_POD_
+// We lose support for NULL detection where the compiler doesn't like
+// passing non-POD classes through ellipsis (...).
+# define GTEST_IS_NULL_LITERAL_(x) false
+#else
+# define GTEST_IS_NULL_LITERAL_(x) \
+ (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1)
+#endif // GTEST_ELLIPSIS_NEEDS_POD_
+
+// Appends the user-supplied message to the Google-Test-generated message.
+GTEST_API_ String AppendUserMessage(const String& gtest_msg,
+ const Message& user_msg);
+
+// A helper class for creating scoped traces in user programs.
+class GTEST_API_ ScopedTrace {
+ public:
+ // The c'tor pushes the given source file location and message onto
+ // a trace stack maintained by Google Test.
+ ScopedTrace(const char* file, int line, const Message& message);
+
+ // The d'tor pops the info pushed by the c'tor.
+ //
+ // Note that the d'tor is not virtual in order to be efficient.
+ // Don't inherit from ScopedTrace!
+ ~ScopedTrace();
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace);
+} GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its
+ // c'tor and d'tor. Therefore it doesn't
+ // need to be used otherwise.
+
+// Converts a streamable value to a String. A NULL pointer is
+// converted to "(null)". When the input value is a ::string,
+// ::std::string, ::wstring, or ::std::wstring object, each NUL
+// character in it is replaced with "\\0".
+// Declared here but defined in gtest.h, so that it has access
+// to the definition of the Message class, required by the ARM
+// compiler.
+template <typename T>
+String StreamableToString(const T& streamable);
+
+// The Symbian compiler has a bug that prevents it from selecting the
+// correct overload of FormatForComparisonFailureMessage (see below)
+// unless we pass the first argument by reference. If we do that,
+// however, Visual Age C++ 10.1 generates a compiler error. Therefore
+// we only apply the work-around for Symbian.
+#if defined(__SYMBIAN32__)
+# define GTEST_CREF_WORKAROUND_ const&
+#else
+# define GTEST_CREF_WORKAROUND_
+#endif
+
+// When this operand is a const char* or char*, if the other operand
+// is a ::std::string or ::string, we print this operand as a C string
+// rather than a pointer (we do the same for wide strings); otherwise
+// we print it as a pointer to be safe.
+
+// This internal macro is used to avoid duplicated code.
+#define GTEST_FORMAT_IMPL_(operand2_type, operand1_printer)\
+inline String FormatForComparisonFailureMessage(\
+ operand2_type::value_type* GTEST_CREF_WORKAROUND_ str, \
+ const operand2_type& /*operand2*/) {\
+ return operand1_printer(str);\
+}\
+inline String FormatForComparisonFailureMessage(\
+ const operand2_type::value_type* GTEST_CREF_WORKAROUND_ str, \
+ const operand2_type& /*operand2*/) {\
+ return operand1_printer(str);\
+}
+
+GTEST_FORMAT_IMPL_(::std::string, String::ShowCStringQuoted)
+#if GTEST_HAS_STD_WSTRING
+GTEST_FORMAT_IMPL_(::std::wstring, String::ShowWideCStringQuoted)
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_GLOBAL_STRING
+GTEST_FORMAT_IMPL_(::string, String::ShowCStringQuoted)
+#endif // GTEST_HAS_GLOBAL_STRING
+#if GTEST_HAS_GLOBAL_WSTRING
+GTEST_FORMAT_IMPL_(::wstring, String::ShowWideCStringQuoted)
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+#undef GTEST_FORMAT_IMPL_
+
+// The next four overloads handle the case where the operand being
+// printed is a char/wchar_t pointer and the other operand is not a
+// string/wstring object. In such cases, we just print the operand as
+// a pointer to be safe.
+#define GTEST_FORMAT_CHAR_PTR_IMPL_(CharType) \
+ template <typename T> \
+ String FormatForComparisonFailureMessage(CharType* GTEST_CREF_WORKAROUND_ p, \
+ const T&) { \
+ return PrintToString(static_cast<const void*>(p)); \
+ }
+
+GTEST_FORMAT_CHAR_PTR_IMPL_(char)
+GTEST_FORMAT_CHAR_PTR_IMPL_(const char)
+GTEST_FORMAT_CHAR_PTR_IMPL_(wchar_t)
+GTEST_FORMAT_CHAR_PTR_IMPL_(const wchar_t)
+
+#undef GTEST_FORMAT_CHAR_PTR_IMPL_
+
+// Constructs and returns the message for an equality assertion
+// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
+//
+// The first four parameters are the expressions used in the assertion
+// and their values, as strings. For example, for ASSERT_EQ(foo, bar)
+// where foo is 5 and bar is 6, we have:
+//
+// expected_expression: "foo"
+// actual_expression: "bar"
+// expected_value: "5"
+// actual_value: "6"
+//
+// The ignoring_case parameter is true iff the assertion is a
+// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will
+// be inserted into the message.
+GTEST_API_ AssertionResult EqFailure(const char* expected_expression,
+ const char* actual_expression,
+ const String& expected_value,
+ const String& actual_value,
+ bool ignoring_case);
+
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+GTEST_API_ String GetBoolAssertionFailureMessage(
+ const AssertionResult& assertion_result,
+ const char* expression_text,
+ const char* actual_predicate_value,
+ const char* expected_predicate_value);
+
+// This template class represents an IEEE floating-point number
+// (either single-precision or double-precision, depending on the
+// template parameters).
+//
+// The purpose of this class is to do more sophisticated number
+// comparison. (Due to round-off error, etc, it's very unlikely that
+// two floating-points will be equal exactly. Hence a naive
+// comparison by the == operation often doesn't work.)
+//
+// Format of IEEE floating-point:
+//
+// The most-significant bit being the leftmost, an IEEE
+// floating-point looks like
+//
+// sign_bit exponent_bits fraction_bits
+//
+// Here, sign_bit is a single bit that designates the sign of the
+// number.
+//
+// For float, there are 8 exponent bits and 23 fraction bits.
+//
+// For double, there are 11 exponent bits and 52 fraction bits.
+//
+// More details can be found at
+// http://en.wikipedia.org/wiki/IEEE_floating-point_standard.
+//
+// Template parameter:
+//
+// RawType: the raw floating-point type (either float or double)
+template <typename RawType>
+class FloatingPoint {
+ public:
+ // Defines the unsigned integer type that has the same size as the
+ // floating point number.
+ typedef typename TypeWithSize<sizeof(RawType)>::UInt Bits;
+
+ // Constants.
+
+ // # of bits in a number.
+ static const size_t kBitCount = 8*sizeof(RawType);
+
+ // # of fraction bits in a number.
+ static const size_t kFractionBitCount =
+ std::numeric_limits<RawType>::digits - 1;
+
+ // # of exponent bits in a number.
+ static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount;
+
+ // The mask for the sign bit.
+ static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
+
+ // The mask for the fraction bits.
+ static const Bits kFractionBitMask =
+ ~static_cast<Bits>(0) >> (kExponentBitCount + 1);
+
+ // The mask for the exponent bits.
+ static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask);
+
+ // How many ULP's (Units in the Last Place) we want to tolerate when
+ // comparing two numbers. The larger the value, the more error we
+ // allow. A 0 value means that two numbers must be exactly the same
+ // to be considered equal.
+ //
+ // The maximum error of a single floating-point operation is 0.5
+ // units in the last place. On Intel CPU's, all floating-point
+ // calculations are done with 80-bit precision, while double has 64
+ // bits. Therefore, 4 should be enough for ordinary use.
+ //
+ // See the following article for more details on ULP:
+ // http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm.
+ static const size_t kMaxUlps = 4;
+
+ // Constructs a FloatingPoint from a raw floating-point number.
+ //
+ // On an Intel CPU, passing a non-normalized NAN (Not a Number)
+ // around may change its bits, although the new value is guaranteed
+ // to be also a NAN. Therefore, don't expect this constructor to
+ // preserve the bits in x when x is a NAN.
+ explicit FloatingPoint(const RawType& x) { u_.value_ = x; }
+
+ // Static methods
+
+ // Reinterprets a bit pattern as a floating-point number.
+ //
+ // This function is needed to test the AlmostEquals() method.
+ static RawType ReinterpretBits(const Bits bits) {
+ FloatingPoint fp(0);
+ fp.u_.bits_ = bits;
+ return fp.u_.value_;
+ }
+
+ // Returns the floating-point number that represent positive infinity.
+ static RawType Infinity() {
+ return ReinterpretBits(kExponentBitMask);
+ }
+
+ // Non-static methods
+
+ // Returns the bits that represents this number.
+ const Bits &bits() const { return u_.bits_; }
+
+ // Returns the exponent bits of this number.
+ Bits exponent_bits() const { return kExponentBitMask & u_.bits_; }
+
+ // Returns the fraction bits of this number.
+ Bits fraction_bits() const { return kFractionBitMask & u_.bits_; }
+
+ // Returns the sign bit of this number.
+ Bits sign_bit() const { return kSignBitMask & u_.bits_; }
+
+ // Returns true iff this is NAN (not a number).
+ bool is_nan() const {
+ // It's a NAN if the exponent bits are all ones and the fraction
+ // bits are not entirely zeros.
+ return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);
+ }
+
+ // Returns true iff this number is at most kMaxUlps ULP's away from
+ // rhs. In particular, this function:
+ //
+ // - returns false if either number is (or both are) NAN.
+ // - treats really large numbers as almost equal to infinity.
+ // - thinks +0.0 and -0.0 are 0 DLP's apart.
+ bool AlmostEquals(const FloatingPoint& rhs) const {
+ // The IEEE standard says that any comparison operation involving
+ // a NAN must return false.
+ if (is_nan() || rhs.is_nan()) return false;
+
+ return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_)
+ <= kMaxUlps;
+ }
+
+ private:
+ // The data type used to store the actual floating-point number.
+ union FloatingPointUnion {
+ RawType value_; // The raw floating-point number.
+ Bits bits_; // The bits that represent the number.
+ };
+
+ // Converts an integer from the sign-and-magnitude representation to
+ // the biased representation. More precisely, let N be 2 to the
+ // power of (kBitCount - 1), an integer x is represented by the
+ // unsigned number x + N.
+ //
+ // For instance,
+ //
+ // -N + 1 (the most negative number representable using
+ // sign-and-magnitude) is represented by 1;
+ // 0 is represented by N; and
+ // N - 1 (the biggest number representable using
+ // sign-and-magnitude) is represented by 2N - 1.
+ //
+ // Read http://en.wikipedia.org/wiki/Signed_number_representations
+ // for more details on signed number representations.
+ static Bits SignAndMagnitudeToBiased(const Bits &sam) {
+ if (kSignBitMask & sam) {
+ // sam represents a negative number.
+ return ~sam + 1;
+ } else {
+ // sam represents a positive number.
+ return kSignBitMask | sam;
+ }
+ }
+
+ // Given two numbers in the sign-and-magnitude representation,
+ // returns the distance between them as an unsigned number.
+ static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,
+ const Bits &sam2) {
+ const Bits biased1 = SignAndMagnitudeToBiased(sam1);
+ const Bits biased2 = SignAndMagnitudeToBiased(sam2);
+ return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
+ }
+
+ FloatingPointUnion u_;
+};
+
+// Typedefs the instances of the FloatingPoint template class that we
+// care to use.
+typedef FloatingPoint<float> Float;
+typedef FloatingPoint<double> Double;
+
+// In order to catch the mistake of putting tests that use different
+// test fixture classes in the same test case, we need to assign
+// unique IDs to fixture classes and compare them. The TypeId type is
+// used to hold such IDs. The user should treat TypeId as an opaque
+// type: the only operation allowed on TypeId values is to compare
+// them for equality using the == operator.
+typedef const void* TypeId;
+
+template <typename T>
+class TypeIdHelper {
+ public:
+ // dummy_ must not have a const type. Otherwise an overly eager
+ // compiler (e.g. MSVC 7.1 & 8.0) may try to merge
+ // TypeIdHelper<T>::dummy_ for different Ts as an "optimization".
+ static bool dummy_;
+};
+
+template <typename T>
+bool TypeIdHelper<T>::dummy_ = false;
+
+// GetTypeId<T>() returns the ID of type T. Different values will be
+// returned for different types. Calling the function twice with the
+// same type argument is guaranteed to return the same ID.
+template <typename T>
+TypeId GetTypeId() {
+ // The compiler is required to allocate a different
+ // TypeIdHelper<T>::dummy_ variable for each T used to instantiate
+ // the template. Therefore, the address of dummy_ is guaranteed to
+ // be unique.
+ return &(TypeIdHelper<T>::dummy_);
+}
+
+// Returns the type ID of ::testing::Test. Always call this instead
+// of GetTypeId< ::testing::Test>() to get the type ID of
+// ::testing::Test, as the latter may give the wrong result due to a
+// suspected linker bug when compiling Google Test as a Mac OS X
+// framework.
+GTEST_API_ TypeId GetTestTypeId();
+
+// Defines the abstract factory interface that creates instances
+// of a Test object.
+class TestFactoryBase {
+ public:
+ virtual ~TestFactoryBase() {}
+
+ // Creates a test instance to run. The instance is both created and destroyed
+ // within TestInfoImpl::Run()
+ virtual Test* CreateTest() = 0;
+
+ protected:
+ TestFactoryBase() {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase);
+};
+
+// This class provides implementation of TeastFactoryBase interface.
+// It is used in TEST and TEST_F macros.
+template <class TestClass>
+class TestFactoryImpl : public TestFactoryBase {
+ public:
+ virtual Test* CreateTest() { return new TestClass; }
+};
+
+#if GTEST_OS_WINDOWS
+
+// Predicate-formatters for implementing the HRESULT checking macros
+// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}
+// We pass a long instead of HRESULT to avoid causing an
+// include dependency for the HRESULT type.
+GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr,
+ long hr); // NOLINT
+GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr,
+ long hr); // NOLINT
+
+#endif // GTEST_OS_WINDOWS
+
+// Types of SetUpTestCase() and TearDownTestCase() functions.
+typedef void (*SetUpTestCaseFunc)();
+typedef void (*TearDownTestCaseFunc)();
+
+// Creates a new TestInfo object and registers it with Google Test;
+// returns the created object.
+//
+// Arguments:
+//
+// test_case_name: name of the test case
+// name: name of the test
+// type_param the name of the test's type parameter, or NULL if
+// this is not a typed or a type-parameterized test.
+// value_param text representation of the test's value parameter,
+// or NULL if this is not a type-parameterized test.
+// fixture_class_id: ID of the test fixture class
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+// factory: pointer to the factory that creates a test object.
+// The newly created TestInfo instance will assume
+// ownership of the factory object.
+GTEST_API_ TestInfo* MakeAndRegisterTestInfo(
+ const char* test_case_name, const char* name,
+ const char* type_param,
+ const char* value_param,
+ TypeId fixture_class_id,
+ SetUpTestCaseFunc set_up_tc,
+ TearDownTestCaseFunc tear_down_tc,
+ TestFactoryBase* factory);
+
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false. None of pstr, *pstr, and prefix can be NULL.
+GTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr);
+
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// State of the definition of a type-parameterized test case.
+class GTEST_API_ TypedTestCasePState {
+ public:
+ TypedTestCasePState() : registered_(false) {}
+
+ // Adds the given test name to defined_test_names_ and return true
+ // if the test case hasn't been registered; otherwise aborts the
+ // program.
+ bool AddTestName(const char* file, int line, const char* case_name,
+ const char* test_name) {
+ if (registered_) {
+ fprintf(stderr, "%s Test %s must be defined before "
+ "REGISTER_TYPED_TEST_CASE_P(%s, ...).\n",
+ FormatFileLocation(file, line).c_str(), test_name, case_name);
+ fflush(stderr);
+ posix::Abort();
+ }
+ defined_test_names_.insert(test_name);
+ return true;
+ }
+
+ // Verifies that registered_tests match the test names in
+ // defined_test_names_; returns registered_tests if successful, or
+ // aborts the program otherwise.
+ const char* VerifyRegisteredTestNames(
+ const char* file, int line, const char* registered_tests);
+
+ private:
+ bool registered_;
+ ::std::set<const char*> defined_test_names_;
+};
+
+// Skips to the first non-space char after the first comma in 'str';
+// returns NULL if no comma is found in 'str'.
+inline const char* SkipComma(const char* str) {
+ const char* comma = strchr(str, ',');
+ if (comma == NULL) {
+ return NULL;
+ }
+ while (IsSpace(*(++comma))) {}
+ return comma;
+}
+
+// Returns the prefix of 'str' before the first comma in it; returns
+// the entire string if it contains no comma.
+inline String GetPrefixUntilComma(const char* str) {
+ const char* comma = strchr(str, ',');
+ return comma == NULL ? String(str) : String(str, comma - str);
+}
+
+// TypeParameterizedTest<Fixture, TestSel, Types>::Register()
+// registers a list of type-parameterized tests with Google Test. The
+// return value is insignificant - we just need to return something
+// such that we can call this function in a namespace scope.
+//
+// Implementation note: The GTEST_TEMPLATE_ macro declares a template
+// template parameter. It's defined in gtest-type-util.h.
+template <GTEST_TEMPLATE_ Fixture, class TestSel, typename Types>
+class TypeParameterizedTest {
+ public:
+ // 'index' is the index of the test in the type list 'Types'
+ // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase,
+ // Types). Valid values for 'index' are [0, N - 1] where N is the
+ // length of Types.
+ static bool Register(const char* prefix, const char* case_name,
+ const char* test_names, int index) {
+ typedef typename Types::Head Type;
+ typedef Fixture<Type> FixtureClass;
+ typedef typename GTEST_BIND_(TestSel, Type) TestClass;
+
+ // First, registers the first type-parameterized test in the type
+ // list.
+ MakeAndRegisterTestInfo(
+ String::Format("%s%s%s/%d", prefix, prefix[0] == '\0' ? "" : "/",
+ case_name, index).c_str(),
+ GetPrefixUntilComma(test_names).c_str(),
+ GetTypeName<Type>().c_str(),
+ NULL, // No value parameter.
+ GetTypeId<FixtureClass>(),
+ TestClass::SetUpTestCase,
+ TestClass::TearDownTestCase,
+ new TestFactoryImpl<TestClass>);
+
+ // Next, recurses (at compile time) with the tail of the type list.
+ return TypeParameterizedTest<Fixture, TestSel, typename Types::Tail>
+ ::Register(prefix, case_name, test_names, index + 1);
+ }
+};
+
+// The base case for the compile time recursion.
+template <GTEST_TEMPLATE_ Fixture, class TestSel>
+class TypeParameterizedTest<Fixture, TestSel, Types0> {
+ public:
+ static bool Register(const char* /*prefix*/, const char* /*case_name*/,
+ const char* /*test_names*/, int /*index*/) {
+ return true;
+ }
+};
+
+// TypeParameterizedTestCase<Fixture, Tests, Types>::Register()
+// registers *all combinations* of 'Tests' and 'Types' with Google
+// Test. The return value is insignificant - we just need to return
+// something such that we can call this function in a namespace scope.
+template <GTEST_TEMPLATE_ Fixture, typename Tests, typename Types>
+class TypeParameterizedTestCase {
+ public:
+ static bool Register(const char* prefix, const char* case_name,
+ const char* test_names) {
+ typedef typename Tests::Head Head;
+
+ // First, register the first test in 'Test' for each type in 'Types'.
+ TypeParameterizedTest<Fixture, Head, Types>::Register(
+ prefix, case_name, test_names, 0);
+
+ // Next, recurses (at compile time) with the tail of the test list.
+ return TypeParameterizedTestCase<Fixture, typename Tests::Tail, Types>
+ ::Register(prefix, case_name, SkipComma(test_names));
+ }
+};
+
+// The base case for the compile time recursion.
+template <GTEST_TEMPLATE_ Fixture, typename Types>
+class TypeParameterizedTestCase<Fixture, Templates0, Types> {
+ public:
+ static bool Register(const char* /*prefix*/, const char* /*case_name*/,
+ const char* /*test_names*/) {
+ return true;
+ }
+};
+
+#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// Returns the current OS stack trace as a String.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
+// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
+GTEST_API_ String GetCurrentOsStackTraceExceptTop(UnitTest* unit_test,
+ int skip_count);
+
+// Helpers for suppressing warnings on unreachable code or constant
+// condition.
+
+// Always returns true.
+GTEST_API_ bool AlwaysTrue();
+
+// Always returns false.
+inline bool AlwaysFalse() { return !AlwaysTrue(); }
+
+// Helper for suppressing false warning from Clang on a const char*
+// variable declared in a conditional expression always being NULL in
+// the else branch.
+struct GTEST_API_ ConstCharPtr {
+ ConstCharPtr(const char* str) : value(str) {}
+ operator bool() const { return true; }
+ const char* value;
+};
+
+// A simple Linear Congruential Generator for generating random
+// numbers with a uniform distribution. Unlike rand() and srand(), it
+// doesn't use global state (and therefore can't interfere with user
+// code). Unlike rand_r(), it's portable. An LCG isn't very random,
+// but it's good enough for our purposes.
+class GTEST_API_ Random {
+ public:
+ static const UInt32 kMaxRange = 1u << 31;
+
+ explicit Random(UInt32 seed) : state_(seed) {}
+
+ void Reseed(UInt32 seed) { state_ = seed; }
+
+ // Generates a random number from [0, range). Crashes if 'range' is
+ // 0 or greater than kMaxRange.
+ UInt32 Generate(UInt32 range);
+
+ private:
+ UInt32 state_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Random);
+};
+
+// Defining a variable of type CompileAssertTypesEqual<T1, T2> will cause a
+// compiler error iff T1 and T2 are different types.
+template <typename T1, typename T2>
+struct CompileAssertTypesEqual;
+
+template <typename T>
+struct CompileAssertTypesEqual<T, T> {
+};
+
+// Removes the reference from a type if it is a reference type,
+// otherwise leaves it unchanged. This is the same as
+// tr1::remove_reference, which is not widely available yet.
+template <typename T>
+struct RemoveReference { typedef T type; }; // NOLINT
+template <typename T>
+struct RemoveReference<T&> { typedef T type; }; // NOLINT
+
+// A handy wrapper around RemoveReference that works when the argument
+// T depends on template parameters.
+#define GTEST_REMOVE_REFERENCE_(T) \
+ typename ::testing::internal::RemoveReference<T>::type
+
+// Removes const from a type if it is a const type, otherwise leaves
+// it unchanged. This is the same as tr1::remove_const, which is not
+// widely available yet.
+template <typename T>
+struct RemoveConst { typedef T type; }; // NOLINT
+template <typename T>
+struct RemoveConst<const T> { typedef T type; }; // NOLINT
+
+// MSVC 8.0, Sun C++, and IBM XL C++ have a bug which causes the above
+// definition to fail to remove the const in 'const int[3]' and 'const
+// char[3][4]'. The following specialization works around the bug.
+// However, it causes trouble with GCC and thus needs to be
+// conditionally compiled.
+#if defined(_MSC_VER) || defined(__SUNPRO_CC) || defined(__IBMCPP__)
+template <typename T, size_t N>
+struct RemoveConst<const T[N]> {
+ typedef typename RemoveConst<T>::type type[N];
+};
+#endif
+
+// A handy wrapper around RemoveConst that works when the argument
+// T depends on template parameters.
+#define GTEST_REMOVE_CONST_(T) \
+ typename ::testing::internal::RemoveConst<T>::type
+
+// Turns const U&, U&, const U, and U all into U.
+#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \
+ GTEST_REMOVE_CONST_(GTEST_REMOVE_REFERENCE_(T))
+
+// Adds reference to a type if it is not a reference type,
+// otherwise leaves it unchanged. This is the same as
+// tr1::add_reference, which is not widely available yet.
+template <typename T>
+struct AddReference { typedef T& type; }; // NOLINT
+template <typename T>
+struct AddReference<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper around AddReference that works when the argument T
+// depends on template parameters.
+#define GTEST_ADD_REFERENCE_(T) \
+ typename ::testing::internal::AddReference<T>::type
+
+// Adds a reference to const on top of T as necessary. For example,
+// it transforms
+//
+// char ==> const char&
+// const char ==> const char&
+// char& ==> const char&
+// const char& ==> const char&
+//
+// The argument T must depend on some template parameters.
+#define GTEST_REFERENCE_TO_CONST_(T) \
+ GTEST_ADD_REFERENCE_(const GTEST_REMOVE_REFERENCE_(T))
+
+// ImplicitlyConvertible<From, To>::value is a compile-time bool
+// constant that's true iff type From can be implicitly converted to
+// type To.
+template <typename From, typename To>
+class ImplicitlyConvertible {
+ private:
+ // We need the following helper functions only for their types.
+ // They have no implementations.
+
+ // MakeFrom() is an expression whose type is From. We cannot simply
+ // use From(), as the type From may not have a public default
+ // constructor.
+ static From MakeFrom();
+
+ // These two functions are overloaded. Given an expression
+ // Helper(x), the compiler will pick the first version if x can be
+ // implicitly converted to type To; otherwise it will pick the
+ // second version.
+ //
+ // The first version returns a value of size 1, and the second
+ // version returns a value of size 2. Therefore, by checking the
+ // size of Helper(x), which can be done at compile time, we can tell
+ // which version of Helper() is used, and hence whether x can be
+ // implicitly converted to type To.
+ static char Helper(To);
+ static char (&Helper(...))[2]; // NOLINT
+
+ // We have to put the 'public' section after the 'private' section,
+ // or MSVC refuses to compile the code.
+ public:
+ // MSVC warns about implicitly converting from double to int for
+ // possible loss of data, so we need to temporarily disable the
+ // warning.
+#ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4244) // Temporarily disables warning 4244.
+
+ static const bool value =
+ sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1;
+# pragma warning(pop) // Restores the warning state.
+#elif defined(__BORLANDC__)
+ // C++Builder cannot use member overload resolution during template
+ // instantiation. The simplest workaround is to use its C++0x type traits
+ // functions (C++Builder 2009 and above only).
+ static const bool value = __is_convertible(From, To);
+#else
+ static const bool value =
+ sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1;
+#endif // _MSV_VER
+};
+template <typename From, typename To>
+const bool ImplicitlyConvertible<From, To>::value;
+
+// IsAProtocolMessage<T>::value is a compile-time bool constant that's
+// true iff T is type ProtocolMessage, proto2::Message, or a subclass
+// of those.
+template <typename T>
+struct IsAProtocolMessage
+ : public bool_constant<
+ ImplicitlyConvertible<const T*, const ::ProtocolMessage*>::value ||
+ ImplicitlyConvertible<const T*, const ::proto2::Message*>::value> {
+};
+
+// When the compiler sees expression IsContainerTest<C>(0), if C is an
+// STL-style container class, the first overload of IsContainerTest
+// will be viable (since both C::iterator* and C::const_iterator* are
+// valid types and NULL can be implicitly converted to them). It will
+// be picked over the second overload as 'int' is a perfect match for
+// the type of argument 0. If C::iterator or C::const_iterator is not
+// a valid type, the first overload is not viable, and the second
+// overload will be picked. Therefore, we can determine whether C is
+// a container class by checking the type of IsContainerTest<C>(0).
+// The value of the expression is insignificant.
+//
+// Note that we look for both C::iterator and C::const_iterator. The
+// reason is that C++ injects the name of a class as a member of the
+// class itself (e.g. you can refer to class iterator as either
+// 'iterator' or 'iterator::iterator'). If we look for C::iterator
+// only, for example, we would mistakenly think that a class named
+// iterator is an STL container.
+//
+// Also note that the simpler approach of overloading
+// IsContainerTest(typename C::const_iterator*) and
+// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++.
+typedef int IsContainer;
+template <class C>
+IsContainer IsContainerTest(int /* dummy */,
+ typename C::iterator* /* it */ = NULL,
+ typename C::const_iterator* /* const_it */ = NULL) {
+ return 0;
+}
+
+typedef char IsNotContainer;
+template <class C>
+IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; }
+
+// EnableIf<condition>::type is void when 'Cond' is true, and
+// undefined when 'Cond' is false. To use SFINAE to make a function
+// overload only apply when a particular expression is true, add
+// "typename EnableIf<expression>::type* = 0" as the last parameter.
+template<bool> struct EnableIf;
+template<> struct EnableIf<true> { typedef void type; }; // NOLINT
+
+// Utilities for native arrays.
+
+// ArrayEq() compares two k-dimensional native arrays using the
+// elements' operator==, where k can be any integer >= 0. When k is
+// 0, ArrayEq() degenerates into comparing a single pair of values.
+
+template <typename T, typename U>
+bool ArrayEq(const T* lhs, size_t size, const U* rhs);
+
+// This generic version is used when k is 0.
+template <typename T, typename U>
+inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; }
+
+// This overload is used when k >= 1.
+template <typename T, typename U, size_t N>
+inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) {
+ return internal::ArrayEq(lhs, N, rhs);
+}
+
+// This helper reduces code bloat. If we instead put its logic inside
+// the previous ArrayEq() function, arrays with different sizes would
+// lead to different copies of the template code.
+template <typename T, typename U>
+bool ArrayEq(const T* lhs, size_t size, const U* rhs) {
+ for (size_t i = 0; i != size; i++) {
+ if (!internal::ArrayEq(lhs[i], rhs[i]))
+ return false;
+ }
+ return true;
+}
+
+// Finds the first element in the iterator range [begin, end) that
+// equals elem. Element may be a native array type itself.
+template <typename Iter, typename Element>
+Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) {
+ for (Iter it = begin; it != end; ++it) {
+ if (internal::ArrayEq(*it, elem))
+ return it;
+ }
+ return end;
+}
+
+// CopyArray() copies a k-dimensional native array using the elements'
+// operator=, where k can be any integer >= 0. When k is 0,
+// CopyArray() degenerates into copying a single value.
+
+template <typename T, typename U>
+void CopyArray(const T* from, size_t size, U* to);
+
+// This generic version is used when k is 0.
+template <typename T, typename U>
+inline void CopyArray(const T& from, U* to) { *to = from; }
+
+// This overload is used when k >= 1.
+template <typename T, typename U, size_t N>
+inline void CopyArray(const T(&from)[N], U(*to)[N]) {
+ internal::CopyArray(from, N, *to);
+}
+
+// This helper reduces code bloat. If we instead put its logic inside
+// the previous CopyArray() function, arrays with different sizes
+// would lead to different copies of the template code.
+template <typename T, typename U>
+void CopyArray(const T* from, size_t size, U* to) {
+ for (size_t i = 0; i != size; i++) {
+ internal::CopyArray(from[i], to + i);
+ }
+}
+
+// The relation between an NativeArray object (see below) and the
+// native array it represents.
+enum RelationToSource {
+ kReference, // The NativeArray references the native array.
+ kCopy // The NativeArray makes a copy of the native array and
+ // owns the copy.
+};
+
+// Adapts a native array to a read-only STL-style container. Instead
+// of the complete STL container concept, this adaptor only implements
+// members useful for Google Mock's container matchers. New members
+// should be added as needed. To simplify the implementation, we only
+// support Element being a raw type (i.e. having no top-level const or
+// reference modifier). It's the client's responsibility to satisfy
+// this requirement. Element can be an array type itself (hence
+// multi-dimensional arrays are supported).
+template <typename Element>
+class NativeArray {
+ public:
+ // STL-style container typedefs.
+ typedef Element value_type;
+ typedef Element* iterator;
+ typedef const Element* const_iterator;
+
+ // Constructs from a native array.
+ NativeArray(const Element* array, size_t count, RelationToSource relation) {
+ Init(array, count, relation);
+ }
+
+ // Copy constructor.
+ NativeArray(const NativeArray& rhs) {
+ Init(rhs.array_, rhs.size_, rhs.relation_to_source_);
+ }
+
+ ~NativeArray() {
+ // Ensures that the user doesn't instantiate NativeArray with a
+ // const or reference type.
+ static_cast<void>(StaticAssertTypeEqHelper<Element,
+ GTEST_REMOVE_REFERENCE_AND_CONST_(Element)>());
+ if (relation_to_source_ == kCopy)
+ delete[] array_;
+ }
+
+ // STL-style container methods.
+ size_t size() const { return size_; }
+ const_iterator begin() const { return array_; }
+ const_iterator end() const { return array_ + size_; }
+ bool operator==(const NativeArray& rhs) const {
+ return size() == rhs.size() &&
+ ArrayEq(begin(), size(), rhs.begin());
+ }
+
+ private:
+ // Initializes this object; makes a copy of the input array if
+ // 'relation' is kCopy.
+ void Init(const Element* array, size_t a_size, RelationToSource relation) {
+ if (relation == kReference) {
+ array_ = array;
+ } else {
+ Element* const copy = new Element[a_size];
+ CopyArray(array, a_size, copy);
+ array_ = copy;
+ }
+ size_ = a_size;
+ relation_to_source_ = relation;
+ }
+
+ const Element* array_;
+ size_t size_;
+ RelationToSource relation_to_source_;
+
+ GTEST_DISALLOW_ASSIGN_(NativeArray);
+};
+
+} // namespace internal
+} // namespace testing
+
+#define GTEST_MESSAGE_AT_(file, line, message, result_type) \
+ ::testing::internal::AssertHelper(result_type, file, line, message) \
+ = ::testing::Message()
+
+#define GTEST_MESSAGE_(message, result_type) \
+ GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type)
+
+#define GTEST_FATAL_FAILURE_(message) \
+ return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure)
+
+#define GTEST_NONFATAL_FAILURE_(message) \
+ GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure)
+
+#define GTEST_SUCCESS_(message) \
+ GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess)
+
+// Suppresses MSVC warnings 4072 (unreachable code) for the code following
+// statement if it returns or throws (or doesn't return or throw in some
+// situations).
+#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \
+ if (::testing::internal::AlwaysTrue()) { statement; }
+
+#define GTEST_TEST_THROW_(statement, expected_exception, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::ConstCharPtr gtest_msg = "") { \
+ bool gtest_caught_expected = false; \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (expected_exception const&) { \
+ gtest_caught_expected = true; \
+ } \
+ catch (...) { \
+ gtest_msg.value = \
+ "Expected: " #statement " throws an exception of type " \
+ #expected_exception ".\n Actual: it throws a different type."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+ } \
+ if (!gtest_caught_expected) { \
+ gtest_msg.value = \
+ "Expected: " #statement " throws an exception of type " \
+ #expected_exception ".\n Actual: it throws nothing."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \
+ fail(gtest_msg.value)
+
+#define GTEST_TEST_NO_THROW_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (...) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \
+ fail("Expected: " #statement " doesn't throw an exception.\n" \
+ " Actual: it throws.")
+
+#define GTEST_TEST_ANY_THROW_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ bool gtest_caught_any = false; \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (...) { \
+ gtest_caught_any = true; \
+ } \
+ if (!gtest_caught_any) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \
+ fail("Expected: " #statement " throws an exception.\n" \
+ " Actual: it doesn't.")
+
+
+// Implements Boolean test assertions such as EXPECT_TRUE. expression can be
+// either a boolean expression or an AssertionResult. text is a textual
+// represenation of expression as it was passed into the EXPECT_TRUE.
+#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const ::testing::AssertionResult gtest_ar_ = \
+ ::testing::AssertionResult(expression)) \
+ ; \
+ else \
+ fail(::testing::internal::GetBoolAssertionFailureMessage(\
+ gtest_ar_, text, #actual, #expected).c_str())
+
+#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \
+ fail("Expected: " #statement " doesn't generate new fatal " \
+ "failures in the current thread.\n" \
+ " Actual: it does.")
+
+// Expands to the name of the class that implements the given test.
+#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
+ test_case_name##_##test_name##_Test
+
+// Helper macro for defining tests.
+#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\
+class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\
+ public:\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\
+ private:\
+ virtual void TestBody();\
+ static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\
+};\
+\
+::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\
+ ::test_info_ =\
+ ::testing::internal::MakeAndRegisterTestInfo(\
+ #test_case_name, #test_name, NULL, NULL, \
+ (parent_id), \
+ parent_class::SetUpTestCase, \
+ parent_class::TearDownTestCase, \
+ new ::testing::internal::TestFactoryImpl<\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\
+void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the public API for death tests. It is
+// #included by gtest.h so a user doesn't need to include this
+// directly.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines internal utilities needed for implementing
+// death tests. They are subject to change without notice.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+
+
+#include <stdio.h>
+
+namespace testing {
+namespace internal {
+
+GTEST_DECLARE_string_(internal_run_death_test);
+
+// Names of the flags (needed for parsing Google Test flags).
+const char kDeathTestStyleFlag[] = "death_test_style";
+const char kDeathTestUseFork[] = "death_test_use_fork";
+const char kInternalRunDeathTestFlag[] = "internal_run_death_test";
+
+#if GTEST_HAS_DEATH_TEST
+
+// DeathTest is a class that hides much of the complexity of the
+// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method
+// returns a concrete class that depends on the prevailing death test
+// style, as defined by the --gtest_death_test_style and/or
+// --gtest_internal_run_death_test flags.
+
+// In describing the results of death tests, these terms are used with
+// the corresponding definitions:
+//
+// exit status: The integer exit information in the format specified
+// by wait(2)
+// exit code: The integer code passed to exit(3), _exit(2), or
+// returned from main()
+class GTEST_API_ DeathTest {
+ public:
+ // Create returns false if there was an error determining the
+ // appropriate action to take for the current death test; for example,
+ // if the gtest_death_test_style flag is set to an invalid value.
+ // The LastMessage method will return a more detailed message in that
+ // case. Otherwise, the DeathTest pointer pointed to by the "test"
+ // argument is set. If the death test should be skipped, the pointer
+ // is set to NULL; otherwise, it is set to the address of a new concrete
+ // DeathTest object that controls the execution of the current test.
+ static bool Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test);
+ DeathTest();
+ virtual ~DeathTest() { }
+
+ // A helper class that aborts a death test when it's deleted.
+ class ReturnSentinel {
+ public:
+ explicit ReturnSentinel(DeathTest* test) : test_(test) { }
+ ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); }
+ private:
+ DeathTest* const test_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel);
+ } GTEST_ATTRIBUTE_UNUSED_;
+
+ // An enumeration of possible roles that may be taken when a death
+ // test is encountered. EXECUTE means that the death test logic should
+ // be executed immediately. OVERSEE means that the program should prepare
+ // the appropriate environment for a child process to execute the death
+ // test, then wait for it to complete.
+ enum TestRole { OVERSEE_TEST, EXECUTE_TEST };
+
+ // An enumeration of the three reasons that a test might be aborted.
+ enum AbortReason {
+ TEST_ENCOUNTERED_RETURN_STATEMENT,
+ TEST_THREW_EXCEPTION,
+ TEST_DID_NOT_DIE
+ };
+
+ // Assumes one of the above roles.
+ virtual TestRole AssumeRole() = 0;
+
+ // Waits for the death test to finish and returns its status.
+ virtual int Wait() = 0;
+
+ // Returns true if the death test passed; that is, the test process
+ // exited during the test, its exit status matches a user-supplied
+ // predicate, and its stderr output matches a user-supplied regular
+ // expression.
+ // The user-supplied predicate may be a macro expression rather
+ // than a function pointer or functor, or else Wait and Passed could
+ // be combined.
+ virtual bool Passed(bool exit_status_ok) = 0;
+
+ // Signals that the death test did not die as expected.
+ virtual void Abort(AbortReason reason) = 0;
+
+ // Returns a human-readable outcome message regarding the outcome of
+ // the last death test.
+ static const char* LastMessage();
+
+ static void set_last_death_test_message(const String& message);
+
+ private:
+ // A string containing a description of the outcome of the last death test.
+ static String last_death_test_message_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest);
+};
+
+// Factory interface for death tests. May be mocked out for testing.
+class DeathTestFactory {
+ public:
+ virtual ~DeathTestFactory() { }
+ virtual bool Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test) = 0;
+};
+
+// A concrete DeathTestFactory implementation for normal use.
+class DefaultDeathTestFactory : public DeathTestFactory {
+ public:
+ virtual bool Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test);
+};
+
+// Returns true if exit_status describes a process that was terminated
+// by a signal, or exited normally with a nonzero exit code.
+GTEST_API_ bool ExitedUnsuccessfully(int exit_status);
+
+// Traps C++ exceptions escaping statement and reports them as test
+// failures. Note that trapping SEH exceptions is not implemented here.
+# if GTEST_HAS_EXCEPTIONS
+# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } catch (const ::std::exception& gtest_exception) { \
+ fprintf(\
+ stderr, \
+ "\n%s: Caught std::exception-derived exception escaping the " \
+ "death test statement. Exception message: %s\n", \
+ ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \
+ gtest_exception.what()); \
+ fflush(stderr); \
+ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
+ } catch (...) { \
+ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
+ }
+
+# else
+# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
+
+# endif
+
+// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*,
+// ASSERT_EXIT*, and EXPECT_EXIT*.
+# define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ const ::testing::internal::RE& gtest_regex = (regex); \
+ ::testing::internal::DeathTest* gtest_dt; \
+ if (!::testing::internal::DeathTest::Create(#statement, &gtest_regex, \
+ __FILE__, __LINE__, &gtest_dt)) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
+ } \
+ if (gtest_dt != NULL) { \
+ ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \
+ gtest_dt_ptr(gtest_dt); \
+ switch (gtest_dt->AssumeRole()) { \
+ case ::testing::internal::DeathTest::OVERSEE_TEST: \
+ if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
+ } \
+ break; \
+ case ::testing::internal::DeathTest::EXECUTE_TEST: { \
+ ::testing::internal::DeathTest::ReturnSentinel \
+ gtest_sentinel(gtest_dt); \
+ GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \
+ gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \
+ break; \
+ } \
+ default: \
+ break; \
+ } \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \
+ fail(::testing::internal::DeathTest::LastMessage())
+// The symbol "fail" here expands to something into which a message
+// can be streamed.
+
+// A class representing the parsed contents of the
+// --gtest_internal_run_death_test flag, as it existed when
+// RUN_ALL_TESTS was called.
+class InternalRunDeathTestFlag {
+ public:
+ InternalRunDeathTestFlag(const String& a_file,
+ int a_line,
+ int an_index,
+ int a_write_fd)
+ : file_(a_file), line_(a_line), index_(an_index),
+ write_fd_(a_write_fd) {}
+
+ ~InternalRunDeathTestFlag() {
+ if (write_fd_ >= 0)
+ posix::Close(write_fd_);
+ }
+
+ String file() const { return file_; }
+ int line() const { return line_; }
+ int index() const { return index_; }
+ int write_fd() const { return write_fd_; }
+
+ private:
+ String file_;
+ int line_;
+ int index_;
+ int write_fd_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag);
+};
+
+// Returns a newly created InternalRunDeathTestFlag object with fields
+// initialized from the GTEST_FLAG(internal_run_death_test) flag if
+// the flag is specified; otherwise returns NULL.
+InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag();
+
+#else // GTEST_HAS_DEATH_TEST
+
+// This macro is used for implementing macros such as
+// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where
+// death tests are not supported. Those macros must compile on such systems
+// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on
+// systems that support death tests. This allows one to write such a macro
+// on a system that does not support death tests and be sure that it will
+// compile on a death-test supporting system.
+//
+// Parameters:
+// statement - A statement that a macro such as EXPECT_DEATH would test
+// for program termination. This macro has to make sure this
+// statement is compiled but not executed, to ensure that
+// EXPECT_DEATH_IF_SUPPORTED compiles with a certain
+// parameter iff EXPECT_DEATH compiles with it.
+// regex - A regex that a macro such as EXPECT_DEATH would use to test
+// the output of statement. This parameter has to be
+// compiled but not evaluated by this macro, to ensure that
+// this macro only accepts expressions that a macro such as
+// EXPECT_DEATH would accept.
+// terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED
+// and a return statement for ASSERT_DEATH_IF_SUPPORTED.
+// This ensures that ASSERT_DEATH_IF_SUPPORTED will not
+// compile inside functions where ASSERT_DEATH doesn't
+// compile.
+//
+// The branch that has an always false condition is used to ensure that
+// statement and regex are compiled (and thus syntactically correct) but
+// never executed. The unreachable code macro protects the terminator
+// statement from generating an 'unreachable code' warning in case
+// statement unconditionally returns or throws. The Message constructor at
+// the end allows the syntax of streaming additional messages into the
+// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.
+# define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ GTEST_LOG_(WARNING) \
+ << "Death tests are not supported on this platform.\n" \
+ << "Statement '" #statement "' cannot be verified."; \
+ } else if (::testing::internal::AlwaysFalse()) { \
+ ::testing::internal::RE::PartialMatch(".*", (regex)); \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ terminator; \
+ } else \
+ ::testing::Message()
+
+#endif // GTEST_HAS_DEATH_TEST
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+
+namespace testing {
+
+// This flag controls the style of death tests. Valid values are "threadsafe",
+// meaning that the death test child process will re-execute the test binary
+// from the start, running only a single death test, or "fast",
+// meaning that the child process will execute the test logic immediately
+// after forking.
+GTEST_DECLARE_string_(death_test_style);
+
+#if GTEST_HAS_DEATH_TEST
+
+// The following macros are useful for writing death tests.
+
+// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is
+// executed:
+//
+// 1. It generates a warning if there is more than one active
+// thread. This is because it's safe to fork() or clone() only
+// when there is a single thread.
+//
+// 2. The parent process clone()s a sub-process and runs the death
+// test in it; the sub-process exits with code 0 at the end of the
+// death test, if it hasn't exited already.
+//
+// 3. The parent process waits for the sub-process to terminate.
+//
+// 4. The parent process checks the exit code and error message of
+// the sub-process.
+//
+// Examples:
+//
+// ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number");
+// for (int i = 0; i < 5; i++) {
+// EXPECT_DEATH(server.ProcessRequest(i),
+// "Invalid request .* in ProcessRequest()")
+// << "Failed to die on request " << i);
+// }
+//
+// ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting");
+//
+// bool KilledBySIGHUP(int exit_code) {
+// return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP;
+// }
+//
+// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!");
+//
+// On the regular expressions used in death tests:
+//
+// On POSIX-compliant systems (*nix), we use the <regex.h> library,
+// which uses the POSIX extended regex syntax.
+//
+// On other platforms (e.g. Windows), we only support a simple regex
+// syntax implemented as part of Google Test. This limited
+// implementation should be enough most of the time when writing
+// death tests; though it lacks many features you can find in PCRE
+// or POSIX extended regex syntax. For example, we don't support
+// union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and
+// repetition count ("x{5,7}"), among others.
+//
+// Below is the syntax that we do support. We chose it to be a
+// subset of both PCRE and POSIX extended regex, so it's easy to
+// learn wherever you come from. In the following: 'A' denotes a
+// literal character, period (.), or a single \\ escape sequence;
+// 'x' and 'y' denote regular expressions; 'm' and 'n' are for
+// natural numbers.
+//
+// c matches any literal character c
+// \\d matches any decimal digit
+// \\D matches any character that's not a decimal digit
+// \\f matches \f
+// \\n matches \n
+// \\r matches \r
+// \\s matches any ASCII whitespace, including \n
+// \\S matches any character that's not a whitespace
+// \\t matches \t
+// \\v matches \v
+// \\w matches any letter, _, or decimal digit
+// \\W matches any character that \\w doesn't match
+// \\c matches any literal character c, which must be a punctuation
+// . matches any single character except \n
+// A? matches 0 or 1 occurrences of A
+// A* matches 0 or many occurrences of A
+// A+ matches 1 or many occurrences of A
+// ^ matches the beginning of a string (not that of each line)
+// $ matches the end of a string (not that of each line)
+// xy matches x followed by y
+//
+// If you accidentally use PCRE or POSIX extended regex features
+// not implemented by us, you will get a run-time failure. In that
+// case, please try to rewrite your regular expression within the
+// above syntax.
+//
+// This implementation is *not* meant to be as highly tuned or robust
+// as a compiled regex library, but should perform well enough for a
+// death test, which already incurs significant overhead by launching
+// a child process.
+//
+// Known caveats:
+//
+// A "threadsafe" style death test obtains the path to the test
+// program from argv[0] and re-executes it in the sub-process. For
+// simplicity, the current implementation doesn't search the PATH
+// when launching the sub-process. This means that the user must
+// invoke the test program via a path that contains at least one
+// path separator (e.g. path/to/foo_test and
+// /absolute/path/to/bar_test are fine, but foo_test is not). This
+// is rarely a problem as people usually don't put the test binary
+// directory in PATH.
+//
+// TODO(wan@google.com): make thread-safe death tests search the PATH.
+
+// Asserts that a given statement causes the program to exit, with an
+// integer exit status that satisfies predicate, and emitting error output
+// that matches regex.
+# define ASSERT_EXIT(statement, predicate, regex) \
+ GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
+
+// Like ASSERT_EXIT, but continues on to successive tests in the
+// test case, if any:
+# define EXPECT_EXIT(statement, predicate, regex) \
+ GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
+
+// Asserts that a given statement causes the program to exit, either by
+// explicitly exiting with a nonzero exit code or being killed by a
+// signal, and emitting error output that matches regex.
+# define ASSERT_DEATH(statement, regex) \
+ ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
+
+// Like ASSERT_DEATH, but continues on to successive tests in the
+// test case, if any:
+# define EXPECT_DEATH(statement, regex) \
+ EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
+
+// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
+
+// Tests that an exit code describes a normal exit with a given exit code.
+class GTEST_API_ ExitedWithCode {
+ public:
+ explicit ExitedWithCode(int exit_code);
+ bool operator()(int exit_status) const;
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ExitedWithCode& other);
+
+ const int exit_code_;
+};
+
+# if !GTEST_OS_WINDOWS
+// Tests that an exit code describes an exit due to termination by a
+// given signal.
+class GTEST_API_ KilledBySignal {
+ public:
+ explicit KilledBySignal(int signum);
+ bool operator()(int exit_status) const;
+ private:
+ const int signum_;
+};
+# endif // !GTEST_OS_WINDOWS
+
+// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.
+// The death testing framework causes this to have interesting semantics,
+// since the sideeffects of the call are only visible in opt mode, and not
+// in debug mode.
+//
+// In practice, this can be used to test functions that utilize the
+// LOG(DFATAL) macro using the following style:
+//
+// int DieInDebugOr12(int* sideeffect) {
+// if (sideeffect) {
+// *sideeffect = 12;
+// }
+// LOG(DFATAL) << "death";
+// return 12;
+// }
+//
+// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) {
+// int sideeffect = 0;
+// // Only asserts in dbg.
+// EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death");
+//
+// #ifdef NDEBUG
+// // opt-mode has sideeffect visible.
+// EXPECT_EQ(12, sideeffect);
+// #else
+// // dbg-mode no visible sideeffect.
+// EXPECT_EQ(0, sideeffect);
+// #endif
+// }
+//
+// This will assert that DieInDebugReturn12InOpt() crashes in debug
+// mode, usually due to a DCHECK or LOG(DFATAL), but returns the
+// appropriate fallback value (12 in this case) in opt mode. If you
+// need to test that a function has appropriate side-effects in opt
+// mode, include assertions against the side-effects. A general
+// pattern for this is:
+//
+// EXPECT_DEBUG_DEATH({
+// // Side-effects here will have an effect after this statement in
+// // opt mode, but none in debug mode.
+// EXPECT_EQ(12, DieInDebugOr12(&sideeffect));
+// }, "death");
+//
+# ifdef NDEBUG
+
+# define EXPECT_DEBUG_DEATH(statement, regex) \
+ do { statement; } while (::testing::internal::AlwaysFalse())
+
+# define ASSERT_DEBUG_DEATH(statement, regex) \
+ do { statement; } while (::testing::internal::AlwaysFalse())
+
+# else
+
+# define EXPECT_DEBUG_DEATH(statement, regex) \
+ EXPECT_DEATH(statement, regex)
+
+# define ASSERT_DEBUG_DEATH(statement, regex) \
+ ASSERT_DEATH(statement, regex)
+
+# endif // NDEBUG for EXPECT_DEBUG_DEATH
+#endif // GTEST_HAS_DEATH_TEST
+
+// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and
+// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if
+// death tests are supported; otherwise they just issue a warning. This is
+// useful when you are combining death test assertions with normal test
+// assertions in one test.
+#if GTEST_HAS_DEATH_TEST
+# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ EXPECT_DEATH(statement, regex)
+# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ ASSERT_DEATH(statement, regex)
+#else
+# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, )
+# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return)
+#endif
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the Message class.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user
+// program!
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+
+#include <limits>
+
+
+namespace testing {
+
+// The Message class works like an ostream repeater.
+//
+// Typical usage:
+//
+// 1. You stream a bunch of values to a Message object.
+// It will remember the text in a stringstream.
+// 2. Then you stream the Message object to an ostream.
+// This causes the text in the Message to be streamed
+// to the ostream.
+//
+// For example;
+//
+// testing::Message foo;
+// foo << 1 << " != " << 2;
+// std::cout << foo;
+//
+// will print "1 != 2".
+//
+// Message is not intended to be inherited from. In particular, its
+// destructor is not virtual.
+//
+// Note that stringstream behaves differently in gcc and in MSVC. You
+// can stream a NULL char pointer to it in the former, but not in the
+// latter (it causes an access violation if you do). The Message
+// class hides this difference by treating a NULL char pointer as
+// "(null)".
+class GTEST_API_ Message {
+ private:
+ // The type of basic IO manipulators (endl, ends, and flush) for
+ // narrow streams.
+ typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&);
+
+ public:
+ // Constructs an empty Message.
+ // We allocate the stringstream separately because otherwise each use of
+ // ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's
+ // stack frame leading to huge stack frames in some cases; gcc does not reuse
+ // the stack space.
+ Message() : ss_(new ::std::stringstream) {
+ // By default, we want there to be enough precision when printing
+ // a double to a Message.
+ *ss_ << std::setprecision(std::numeric_limits<double>::digits10 + 2);
+ }
+
+ // Copy constructor.
+ Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT
+ *ss_ << msg.GetString();
+ }
+
+ // Constructs a Message from a C-string.
+ explicit Message(const char* str) : ss_(new ::std::stringstream) {
+ *ss_ << str;
+ }
+
+#if GTEST_OS_SYMBIAN
+ // Streams a value (either a pointer or not) to this object.
+ template <typename T>
+ inline Message& operator <<(const T& value) {
+ StreamHelper(typename internal::is_pointer<T>::type(), value);
+ return *this;
+ }
+#else
+ // Streams a non-pointer value to this object.
+ template <typename T>
+ inline Message& operator <<(const T& val) {
+ ::GTestStreamToHelper(ss_.get(), val);
+ return *this;
+ }
+
+ // Streams a pointer value to this object.
+ //
+ // This function is an overload of the previous one. When you
+ // stream a pointer to a Message, this definition will be used as it
+ // is more specialized. (The C++ Standard, section
+ // [temp.func.order].) If you stream a non-pointer, then the
+ // previous definition will be used.
+ //
+ // The reason for this overload is that streaming a NULL pointer to
+ // ostream is undefined behavior. Depending on the compiler, you
+ // may get "0", "(nil)", "(null)", or an access violation. To
+ // ensure consistent result across compilers, we always treat NULL
+ // as "(null)".
+ template <typename T>
+ inline Message& operator <<(T* const& pointer) { // NOLINT
+ if (pointer == NULL) {
+ *ss_ << "(null)";
+ } else {
+ ::GTestStreamToHelper(ss_.get(), pointer);
+ }
+ return *this;
+ }
+#endif // GTEST_OS_SYMBIAN
+
+ // Since the basic IO manipulators are overloaded for both narrow
+ // and wide streams, we have to provide this specialized definition
+ // of operator <<, even though its body is the same as the
+ // templatized version above. Without this definition, streaming
+ // endl or other basic IO manipulators to Message will confuse the
+ // compiler.
+ Message& operator <<(BasicNarrowIoManip val) {
+ *ss_ << val;
+ return *this;
+ }
+
+ // Instead of 1/0, we want to see true/false for bool values.
+ Message& operator <<(bool b) {
+ return *this << (b ? "true" : "false");
+ }
+
+ // These two overloads allow streaming a wide C string to a Message
+ // using the UTF-8 encoding.
+ Message& operator <<(const wchar_t* wide_c_str) {
+ return *this << internal::String::ShowWideCString(wide_c_str);
+ }
+ Message& operator <<(wchar_t* wide_c_str) {
+ return *this << internal::String::ShowWideCString(wide_c_str);
+ }
+
+#if GTEST_HAS_STD_WSTRING
+ // Converts the given wide string to a narrow string using the UTF-8
+ // encoding, and streams the result to this Message object.
+ Message& operator <<(const ::std::wstring& wstr);
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+ // Converts the given wide string to a narrow string using the UTF-8
+ // encoding, and streams the result to this Message object.
+ Message& operator <<(const ::wstring& wstr);
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+ // Gets the text streamed to this object so far as a String.
+ // Each '\0' character in the buffer is replaced with "\\0".
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ internal::String GetString() const {
+ return internal::StringStreamToString(ss_.get());
+ }
+
+ private:
+
+#if GTEST_OS_SYMBIAN
+ // These are needed as the Nokia Symbian Compiler cannot decide between
+ // const T& and const T* in a function template. The Nokia compiler _can_
+ // decide between class template specializations for T and T*, so a
+ // tr1::type_traits-like is_pointer works, and we can overload on that.
+ template <typename T>
+ inline void StreamHelper(internal::true_type /*dummy*/, T* pointer) {
+ if (pointer == NULL) {
+ *ss_ << "(null)";
+ } else {
+ ::GTestStreamToHelper(ss_.get(), pointer);
+ }
+ }
+ template <typename T>
+ inline void StreamHelper(internal::false_type /*dummy*/, const T& value) {
+ ::GTestStreamToHelper(ss_.get(), value);
+ }
+#endif // GTEST_OS_SYMBIAN
+
+ // We'll hold the text streamed to this object here.
+ const internal::scoped_ptr< ::std::stringstream> ss_;
+
+ // We declare (but don't implement) this to prevent the compiler
+ // from implementing the assignment operator.
+ void operator=(const Message&);
+};
+
+// Streams a Message to an ostream.
+inline std::ostream& operator <<(std::ostream& os, const Message& sb) {
+ return os << sb.GetString();
+}
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+// This file was GENERATED by command:
+// pump.py gtest-param-test.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: vladl@google.com (Vlad Losev)
+//
+// Macros and functions for implementing parameterized tests
+// in Google C++ Testing Framework (Google Test)
+//
+// This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+
+
+// Value-parameterized tests allow you to test your code with different
+// parameters without writing multiple copies of the same test.
+//
+// Here is how you use value-parameterized tests:
+
+#if 0
+
+// To write value-parameterized tests, first you should define a fixture
+// class. It is usually derived from testing::TestWithParam<T> (see below for
+// another inheritance scheme that's sometimes useful in more complicated
+// class hierarchies), where the type of your parameter values.
+// TestWithParam<T> is itself derived from testing::Test. T can be any
+// copyable type. If it's a raw pointer, you are responsible for managing the
+// lifespan of the pointed values.
+
+class FooTest : public ::testing::TestWithParam<const char*> {
+ // You can implement all the usual class fixture members here.
+};
+
+// Then, use the TEST_P macro to define as many parameterized tests
+// for this fixture as you want. The _P suffix is for "parameterized"
+// or "pattern", whichever you prefer to think.
+
+TEST_P(FooTest, DoesBlah) {
+ // Inside a test, access the test parameter with the GetParam() method
+ // of the TestWithParam<T> class:
+ EXPECT_TRUE(foo.Blah(GetParam()));
+ ...
+}
+
+TEST_P(FooTest, HasBlahBlah) {
+ ...
+}
+
+// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test
+// case with any set of parameters you want. Google Test defines a number
+// of functions for generating test parameters. They return what we call
+// (surprise!) parameter generators. Here is a summary of them, which
+// are all in the testing namespace:
+//
+//
+// Range(begin, end [, step]) - Yields values {begin, begin+step,
+// begin+step+step, ...}. The values do not
+// include end. step defaults to 1.
+// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}.
+// ValuesIn(container) - Yields values from a C-style array, an STL
+// ValuesIn(begin,end) container, or an iterator range [begin, end).
+// Bool() - Yields sequence {false, true}.
+// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product
+// for the math savvy) of the values generated
+// by the N generators.
+//
+// For more details, see comments at the definitions of these functions below
+// in this file.
+//
+// The following statement will instantiate tests from the FooTest test case
+// each with parameter values "meeny", "miny", and "moe".
+
+INSTANTIATE_TEST_CASE_P(InstantiationName,
+ FooTest,
+ Values("meeny", "miny", "moe"));
+
+// To distinguish different instances of the pattern, (yes, you
+// can instantiate it more then once) the first argument to the
+// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the
+// actual test case name. Remember to pick unique prefixes for different
+// instantiations. The tests from the instantiation above will have
+// these names:
+//
+// * InstantiationName/FooTest.DoesBlah/0 for "meeny"
+// * InstantiationName/FooTest.DoesBlah/1 for "miny"
+// * InstantiationName/FooTest.DoesBlah/2 for "moe"
+// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny"
+// * InstantiationName/FooTest.HasBlahBlah/1 for "miny"
+// * InstantiationName/FooTest.HasBlahBlah/2 for "moe"
+//
+// You can use these names in --gtest_filter.
+//
+// This statement will instantiate all tests from FooTest again, each
+// with parameter values "cat" and "dog":
+
+const char* pets[] = {"cat", "dog"};
+INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets));
+
+// The tests from the instantiation above will have these names:
+//
+// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat"
+// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog"
+// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat"
+// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog"
+//
+// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests
+// in the given test case, whether their definitions come before or
+// AFTER the INSTANTIATE_TEST_CASE_P statement.
+//
+// Please also note that generator expressions (including parameters to the
+// generators) are evaluated in InitGoogleTest(), after main() has started.
+// This allows the user on one hand, to adjust generator parameters in order
+// to dynamically determine a set of tests to run and on the other hand,
+// give the user a chance to inspect the generated tests with Google Test
+// reflection API before RUN_ALL_TESTS() is executed.
+//
+// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc
+// for more examples.
+//
+// In the future, we plan to publish the API for defining new parameter
+// generators. But for now this interface remains part of the internal
+// implementation and is subject to change.
+//
+//
+// A parameterized test fixture must be derived from testing::Test and from
+// testing::WithParamInterface<T>, where T is the type of the parameter
+// values. Inheriting from TestWithParam<T> satisfies that requirement because
+// TestWithParam<T> inherits from both Test and WithParamInterface. In more
+// complicated hierarchies, however, it is occasionally useful to inherit
+// separately from Test and WithParamInterface. For example:
+
+class BaseTest : public ::testing::Test {
+ // You can inherit all the usual members for a non-parameterized test
+ // fixture here.
+};
+
+class DerivedTest : public BaseTest, public ::testing::WithParamInterface<int> {
+ // The usual test fixture members go here too.
+};
+
+TEST_F(BaseTest, HasFoo) {
+ // This is an ordinary non-parameterized test.
+}
+
+TEST_P(DerivedTest, DoesBlah) {
+ // GetParam works just the same here as if you inherit from TestWithParam.
+ EXPECT_TRUE(foo.Blah(GetParam()));
+}
+
+#endif // 0
+
+
+#if !GTEST_OS_SYMBIAN
+# include <utility>
+#endif
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vladl@google.com (Vlad Losev)
+
+// Type and function utilities for implementing parameterized tests.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+
+#include <iterator>
+#include <utility>
+#include <vector>
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+// Copyright 2003 Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: Dan Egnor (egnor@google.com)
+//
+// A "smart" pointer type with reference tracking. Every pointer to a
+// particular object is kept on a circular linked list. When the last pointer
+// to an object is destroyed or reassigned, the object is deleted.
+//
+// Used properly, this deletes the object when the last reference goes away.
+// There are several caveats:
+// - Like all reference counting schemes, cycles lead to leaks.
+// - Each smart pointer is actually two pointers (8 bytes instead of 4).
+// - Every time a pointer is assigned, the entire list of pointers to that
+// object is traversed. This class is therefore NOT SUITABLE when there
+// will often be more than two or three pointers to a particular object.
+// - References are only tracked as long as linked_ptr<> objects are copied.
+// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS
+// will happen (double deletion).
+//
+// A good use of this class is storing object references in STL containers.
+// You can safely put linked_ptr<> in a vector<>.
+// Other uses may not be as good.
+//
+// Note: If you use an incomplete type with linked_ptr<>, the class
+// *containing* linked_ptr<> must have a constructor and destructor (even
+// if they do nothing!).
+//
+// Bill Gibbons suggested we use something like this.
+//
+// Thread Safety:
+// Unlike other linked_ptr implementations, in this implementation
+// a linked_ptr object is thread-safe in the sense that:
+// - it's safe to copy linked_ptr objects concurrently,
+// - it's safe to copy *from* a linked_ptr and read its underlying
+// raw pointer (e.g. via get()) concurrently, and
+// - it's safe to write to two linked_ptrs that point to the same
+// shared object concurrently.
+// TODO(wan@google.com): rename this to safe_linked_ptr to avoid
+// confusion with normal linked_ptr.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+
+#include <stdlib.h>
+#include <assert.h>
+
+
+namespace testing {
+namespace internal {
+
+// Protects copying of all linked_ptr objects.
+GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex);
+
+// This is used internally by all instances of linked_ptr<>. It needs to be
+// a non-template class because different types of linked_ptr<> can refer to
+// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)).
+// So, it needs to be possible for different types of linked_ptr to participate
+// in the same circular linked list, so we need a single class type here.
+//
+// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr<T>.
+class linked_ptr_internal {
+ public:
+ // Create a new circle that includes only this instance.
+ void join_new() {
+ next_ = this;
+ }
+
+ // Many linked_ptr operations may change p.link_ for some linked_ptr
+ // variable p in the same circle as this object. Therefore we need
+ // to prevent two such operations from occurring concurrently.
+ //
+ // Note that different types of linked_ptr objects can coexist in a
+ // circle (e.g. linked_ptr<Base>, linked_ptr<Derived1>, and
+ // linked_ptr<Derived2>). Therefore we must use a single mutex to
+ // protect all linked_ptr objects. This can create serious
+ // contention in production code, but is acceptable in a testing
+ // framework.
+
+ // Join an existing circle.
+ // L < g_linked_ptr_mutex
+ void join(linked_ptr_internal const* ptr) {
+ MutexLock lock(&g_linked_ptr_mutex);
+
+ linked_ptr_internal const* p = ptr;
+ while (p->next_ != ptr) p = p->next_;
+ p->next_ = this;
+ next_ = ptr;
+ }
+
+ // Leave whatever circle we're part of. Returns true if we were the
+ // last member of the circle. Once this is done, you can join() another.
+ // L < g_linked_ptr_mutex
+ bool depart() {
+ MutexLock lock(&g_linked_ptr_mutex);
+
+ if (next_ == this) return true;
+ linked_ptr_internal const* p = next_;
+ while (p->next_ != this) p = p->next_;
+ p->next_ = next_;
+ return false;
+ }
+
+ private:
+ mutable linked_ptr_internal const* next_;
+};
+
+template <typename T>
+class linked_ptr {
+ public:
+ typedef T element_type;
+
+ // Take over ownership of a raw pointer. This should happen as soon as
+ // possible after the object is created.
+ explicit linked_ptr(T* ptr = NULL) { capture(ptr); }
+ ~linked_ptr() { depart(); }
+
+ // Copy an existing linked_ptr<>, adding ourselves to the list of references.
+ template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); }
+ linked_ptr(linked_ptr const& ptr) { // NOLINT
+ assert(&ptr != this);
+ copy(&ptr);
+ }
+
+ // Assignment releases the old value and acquires the new.
+ template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) {
+ depart();
+ copy(&ptr);
+ return *this;
+ }
+
+ linked_ptr& operator=(linked_ptr const& ptr) {
+ if (&ptr != this) {
+ depart();
+ copy(&ptr);
+ }
+ return *this;
+ }
+
+ // Smart pointer members.
+ void reset(T* ptr = NULL) {
+ depart();
+ capture(ptr);
+ }
+ T* get() const { return value_; }
+ T* operator->() const { return value_; }
+ T& operator*() const { return *value_; }
+
+ bool operator==(T* p) const { return value_ == p; }
+ bool operator!=(T* p) const { return value_ != p; }
+ template <typename U>
+ bool operator==(linked_ptr<U> const& ptr) const {
+ return value_ == ptr.get();
+ }
+ template <typename U>
+ bool operator!=(linked_ptr<U> const& ptr) const {
+ return value_ != ptr.get();
+ }
+
+ private:
+ template <typename U>
+ friend class linked_ptr;
+
+ T* value_;
+ linked_ptr_internal link_;
+
+ void depart() {
+ if (link_.depart()) delete value_;
+ }
+
+ void capture(T* ptr) {
+ value_ = ptr;
+ link_.join_new();
+ }
+
+ template <typename U> void copy(linked_ptr<U> const* ptr) {
+ value_ = ptr->get();
+ if (value_)
+ link_.join(&ptr->link_);
+ else
+ link_.join_new();
+ }
+};
+
+template<typename T> inline
+bool operator==(T* ptr, const linked_ptr<T>& x) {
+ return ptr == x.get();
+}
+
+template<typename T> inline
+bool operator!=(T* ptr, const linked_ptr<T>& x) {
+ return ptr != x.get();
+}
+
+// A function to convert T* into linked_ptr<T>
+// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation
+// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
+template <typename T>
+linked_ptr<T> make_linked_ptr(T* ptr) {
+ return linked_ptr<T>(ptr);
+}
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Test - The Google C++ Testing Framework
+//
+// This file implements a universal value printer that can print a
+// value of any type T:
+//
+// void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);
+//
+// A user can teach this function how to print a class type T by
+// defining either operator<<() or PrintTo() in the namespace that
+// defines T. More specifically, the FIRST defined function in the
+// following list will be used (assuming T is defined in namespace
+// foo):
+//
+// 1. foo::PrintTo(const T&, ostream*)
+// 2. operator<<(ostream&, const T&) defined in either foo or the
+// global namespace.
+//
+// If none of the above is defined, it will print the debug string of
+// the value if it is a protocol buffer, or print the raw bytes in the
+// value otherwise.
+//
+// To aid debugging: when T is a reference type, the address of the
+// value is also printed; when T is a (const) char pointer, both the
+// pointer value and the NUL-terminated string it points to are
+// printed.
+//
+// We also provide some convenient wrappers:
+//
+// // Prints a value to a string. For a (const or not) char
+// // pointer, the NUL-terminated string (but not the pointer) is
+// // printed.
+// std::string ::testing::PrintToString(const T& value);
+//
+// // Prints a value tersely: for a reference type, the referenced
+// // value (but not the address) is printed; for a (const or not) char
+// // pointer, the NUL-terminated string (but not the pointer) is
+// // printed.
+// void ::testing::internal::UniversalTersePrint(const T& value, ostream*);
+//
+// // Prints value using the type inferred by the compiler. The difference
+// // from UniversalTersePrint() is that this function prints both the
+// // pointer and the NUL-terminated string for a (const or not) char pointer.
+// void ::testing::internal::UniversalPrint(const T& value, ostream*);
+//
+// // Prints the fields of a tuple tersely to a string vector, one
+// // element for each field. Tuple support must be enabled in
+// // gtest-port.h.
+// std::vector<string> UniversalTersePrintTupleFieldsToStrings(
+// const Tuple& value);
+//
+// Known limitation:
+//
+// The print primitives print the elements of an STL-style container
+// using the compiler-inferred type of *iter where iter is a
+// const_iterator of the container. When const_iterator is an input
+// iterator but not a forward iterator, this inferred type may not
+// match value_type, and the print output may be incorrect. In
+// practice, this is rarely a problem as for most containers
+// const_iterator is a forward iterator. We'll fix this if there's an
+// actual need for it. Note that this fix cannot rely on value_type
+// being defined as many user-defined container types don't have
+// value_type.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+
+#include <ostream> // NOLINT
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace testing {
+
+// Definitions in the 'internal' and 'internal2' name spaces are
+// subject to change without notice. DO NOT USE THEM IN USER CODE!
+namespace internal2 {
+
+// Prints the given number of bytes in the given object to the given
+// ostream.
+GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes,
+ size_t count,
+ ::std::ostream* os);
+
+// For selecting which printer to use when a given type has neither <<
+// nor PrintTo().
+enum TypeKind {
+ kProtobuf, // a protobuf type
+ kConvertibleToInteger, // a type implicitly convertible to BiggestInt
+ // (e.g. a named or unnamed enum type)
+ kOtherType // anything else
+};
+
+// TypeWithoutFormatter<T, kTypeKind>::PrintValue(value, os) is called
+// by the universal printer to print a value of type T when neither
+// operator<< nor PrintTo() is defined for T, where kTypeKind is the
+// "kind" of T as defined by enum TypeKind.
+template <typename T, TypeKind kTypeKind>
+class TypeWithoutFormatter {
+ public:
+ // This default version is called when kTypeKind is kOtherType.
+ static void PrintValue(const T& value, ::std::ostream* os) {
+ PrintBytesInObjectTo(reinterpret_cast<const unsigned char*>(&value),
+ sizeof(value), os);
+ }
+};
+
+// We print a protobuf using its ShortDebugString() when the string
+// doesn't exceed this many characters; otherwise we print it using
+// DebugString() for better readability.
+const size_t kProtobufOneLinerMaxLength = 50;
+
+template <typename T>
+class TypeWithoutFormatter<T, kProtobuf> {
+ public:
+ static void PrintValue(const T& value, ::std::ostream* os) {
+ const ::testing::internal::string short_str = value.ShortDebugString();
+ const ::testing::internal::string pretty_str =
+ short_str.length() <= kProtobufOneLinerMaxLength ?
+ short_str : ("\n" + value.DebugString());
+ *os << ("<" + pretty_str + ">");
+ }
+};
+
+template <typename T>
+class TypeWithoutFormatter<T, kConvertibleToInteger> {
+ public:
+ // Since T has no << operator or PrintTo() but can be implicitly
+ // converted to BiggestInt, we print it as a BiggestInt.
+ //
+ // Most likely T is an enum type (either named or unnamed), in which
+ // case printing it as an integer is the desired behavior. In case
+ // T is not an enum, printing it as an integer is the best we can do
+ // given that it has no user-defined printer.
+ static void PrintValue(const T& value, ::std::ostream* os) {
+ const internal::BiggestInt kBigInt = value;
+ *os << kBigInt;
+ }
+};
+
+// Prints the given value to the given ostream. If the value is a
+// protocol message, its debug string is printed; if it's an enum or
+// of a type implicitly convertible to BiggestInt, it's printed as an
+// integer; otherwise the bytes in the value are printed. This is
+// what UniversalPrinter<T>::Print() does when it knows nothing about
+// type T and T has neither << operator nor PrintTo().
+//
+// A user can override this behavior for a class type Foo by defining
+// a << operator in the namespace where Foo is defined.
+//
+// We put this operator in namespace 'internal2' instead of 'internal'
+// to simplify the implementation, as much code in 'internal' needs to
+// use << in STL, which would conflict with our own << were it defined
+// in 'internal'.
+//
+// Note that this operator<< takes a generic std::basic_ostream<Char,
+// CharTraits> type instead of the more restricted std::ostream. If
+// we define it to take an std::ostream instead, we'll get an
+// "ambiguous overloads" compiler error when trying to print a type
+// Foo that supports streaming to std::basic_ostream<Char,
+// CharTraits>, as the compiler cannot tell whether
+// operator<<(std::ostream&, const T&) or
+// operator<<(std::basic_stream<Char, CharTraits>, const Foo&) is more
+// specific.
+template <typename Char, typename CharTraits, typename T>
+::std::basic_ostream<Char, CharTraits>& operator<<(
+ ::std::basic_ostream<Char, CharTraits>& os, const T& x) {
+ TypeWithoutFormatter<T,
+ (internal::IsAProtocolMessage<T>::value ? kProtobuf :
+ internal::ImplicitlyConvertible<const T&, internal::BiggestInt>::value ?
+ kConvertibleToInteger : kOtherType)>::PrintValue(x, &os);
+ return os;
+}
+
+} // namespace internal2
+} // namespace testing
+
+// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up
+// magic needed for implementing UniversalPrinter won't work.
+namespace testing_internal {
+
+// Used to print a value that is not an STL-style container when the
+// user doesn't define PrintTo() for it.
+template <typename T>
+void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) {
+ // With the following statement, during unqualified name lookup,
+ // testing::internal2::operator<< appears as if it was declared in
+ // the nearest enclosing namespace that contains both
+ // ::testing_internal and ::testing::internal2, i.e. the global
+ // namespace. For more details, refer to the C++ Standard section
+ // 7.3.4-1 [namespace.udir]. This allows us to fall back onto
+ // testing::internal2::operator<< in case T doesn't come with a <<
+ // operator.
+ //
+ // We cannot write 'using ::testing::internal2::operator<<;', which
+ // gcc 3.3 fails to compile due to a compiler bug.
+ using namespace ::testing::internal2; // NOLINT
+
+ // Assuming T is defined in namespace foo, in the next statement,
+ // the compiler will consider all of:
+ //
+ // 1. foo::operator<< (thanks to Koenig look-up),
+ // 2. ::operator<< (as the current namespace is enclosed in ::),
+ // 3. testing::internal2::operator<< (thanks to the using statement above).
+ //
+ // The operator<< whose type matches T best will be picked.
+ //
+ // We deliberately allow #2 to be a candidate, as sometimes it's
+ // impossible to define #1 (e.g. when foo is ::std, defining
+ // anything in it is undefined behavior unless you are a compiler
+ // vendor.).
+ *os << value;
+}
+
+} // namespace testing_internal
+
+namespace testing {
+namespace internal {
+
+// UniversalPrinter<T>::Print(value, ostream_ptr) prints the given
+// value to the given ostream. The caller must ensure that
+// 'ostream_ptr' is not NULL, or the behavior is undefined.
+//
+// We define UniversalPrinter as a class template (as opposed to a
+// function template), as we need to partially specialize it for
+// reference types, which cannot be done with function templates.
+template <typename T>
+class UniversalPrinter;
+
+template <typename T>
+void UniversalPrint(const T& value, ::std::ostream* os);
+
+// Used to print an STL-style container when the user doesn't define
+// a PrintTo() for it.
+template <typename C>
+void DefaultPrintTo(IsContainer /* dummy */,
+ false_type /* is not a pointer */,
+ const C& container, ::std::ostream* os) {
+ const size_t kMaxCount = 32; // The maximum number of elements to print.
+ *os << '{';
+ size_t count = 0;
+ for (typename C::const_iterator it = container.begin();
+ it != container.end(); ++it, ++count) {
+ if (count > 0) {
+ *os << ',';
+ if (count == kMaxCount) { // Enough has been printed.
+ *os << " ...";
+ break;
+ }
+ }
+ *os << ' ';
+ // We cannot call PrintTo(*it, os) here as PrintTo() doesn't
+ // handle *it being a native array.
+ internal::UniversalPrint(*it, os);
+ }
+
+ if (count > 0) {
+ *os << ' ';
+ }
+ *os << '}';
+}
+
+// Used to print a pointer that is neither a char pointer nor a member
+// pointer, when the user doesn't define PrintTo() for it. (A member
+// variable pointer or member function pointer doesn't really point to
+// a location in the address space. Their representation is
+// implementation-defined. Therefore they will be printed as raw
+// bytes.)
+template <typename T>
+void DefaultPrintTo(IsNotContainer /* dummy */,
+ true_type /* is a pointer */,
+ T* p, ::std::ostream* os) {
+ if (p == NULL) {
+ *os << "NULL";
+ } else {
+ // C++ doesn't allow casting from a function pointer to any object
+ // pointer.
+ //
+ // IsTrue() silences warnings: "Condition is always true",
+ // "unreachable code".
+ if (IsTrue(ImplicitlyConvertible<T*, const void*>::value)) {
+ // T is not a function type. We just call << to print p,
+ // relying on ADL to pick up user-defined << for their pointer
+ // types, if any.
+ *os << p;
+ } else {
+ // T is a function type, so '*os << p' doesn't do what we want
+ // (it just prints p as bool). We want to print p as a const
+ // void*. However, we cannot cast it to const void* directly,
+ // even using reinterpret_cast, as earlier versions of gcc
+ // (e.g. 3.4.5) cannot compile the cast when p is a function
+ // pointer. Casting to UInt64 first solves the problem.
+ *os << reinterpret_cast<const void*>(
+ reinterpret_cast<internal::UInt64>(p));
+ }
+ }
+}
+
+// Used to print a non-container, non-pointer value when the user
+// doesn't define PrintTo() for it.
+template <typename T>
+void DefaultPrintTo(IsNotContainer /* dummy */,
+ false_type /* is not a pointer */,
+ const T& value, ::std::ostream* os) {
+ ::testing_internal::DefaultPrintNonContainerTo(value, os);
+}
+
+// Prints the given value using the << operator if it has one;
+// otherwise prints the bytes in it. This is what
+// UniversalPrinter<T>::Print() does when PrintTo() is not specialized
+// or overloaded for type T.
+//
+// A user can override this behavior for a class type Foo by defining
+// an overload of PrintTo() in the namespace where Foo is defined. We
+// give the user this option as sometimes defining a << operator for
+// Foo is not desirable (e.g. the coding style may prevent doing it,
+// or there is already a << operator but it doesn't do what the user
+// wants).
+template <typename T>
+void PrintTo(const T& value, ::std::ostream* os) {
+ // DefaultPrintTo() is overloaded. The type of its first two
+ // arguments determine which version will be picked. If T is an
+ // STL-style container, the version for container will be called; if
+ // T is a pointer, the pointer version will be called; otherwise the
+ // generic version will be called.
+ //
+ // Note that we check for container types here, prior to we check
+ // for protocol message types in our operator<<. The rationale is:
+ //
+ // For protocol messages, we want to give people a chance to
+ // override Google Mock's format by defining a PrintTo() or
+ // operator<<. For STL containers, other formats can be
+ // incompatible with Google Mock's format for the container
+ // elements; therefore we check for container types here to ensure
+ // that our format is used.
+ //
+ // The second argument of DefaultPrintTo() is needed to bypass a bug
+ // in Symbian's C++ compiler that prevents it from picking the right
+ // overload between:
+ //
+ // PrintTo(const T& x, ...);
+ // PrintTo(T* x, ...);
+ DefaultPrintTo(IsContainerTest<T>(0), is_pointer<T>(), value, os);
+}
+
+// The following list of PrintTo() overloads tells
+// UniversalPrinter<T>::Print() how to print standard types (built-in
+// types, strings, plain arrays, and pointers).
+
+// Overloads for various char types.
+GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os);
+GTEST_API_ void PrintTo(signed char c, ::std::ostream* os);
+inline void PrintTo(char c, ::std::ostream* os) {
+ // When printing a plain char, we always treat it as unsigned. This
+ // way, the output won't be affected by whether the compiler thinks
+ // char is signed or not.
+ PrintTo(static_cast<unsigned char>(c), os);
+}
+
+// Overloads for other simple built-in types.
+inline void PrintTo(bool x, ::std::ostream* os) {
+ *os << (x ? "true" : "false");
+}
+
+// Overload for wchar_t type.
+// Prints a wchar_t as a symbol if it is printable or as its internal
+// code otherwise and also as its decimal code (except for L'\0').
+// The L'\0' char is printed as "L'\\0'". The decimal code is printed
+// as signed integer when wchar_t is implemented by the compiler
+// as a signed type and is printed as an unsigned integer when wchar_t
+// is implemented as an unsigned type.
+GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os);
+
+// Overloads for C strings.
+GTEST_API_ void PrintTo(const char* s, ::std::ostream* os);
+inline void PrintTo(char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const char*>(s), os);
+}
+
+// signed/unsigned char is often used for representing binary data, so
+// we print pointers to it as void* to be safe.
+inline void PrintTo(const signed char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(signed char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(const unsigned char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(unsigned char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+
+// MSVC can be configured to define wchar_t as a typedef of unsigned
+// short. It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native
+// type. When wchar_t is a typedef, defining an overload for const
+// wchar_t* would cause unsigned short* be printed as a wide string,
+// possibly causing invalid memory accesses.
+#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
+// Overloads for wide C strings
+GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os);
+inline void PrintTo(wchar_t* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const wchar_t*>(s), os);
+}
+#endif
+
+// Overload for C arrays. Multi-dimensional arrays are printed
+// properly.
+
+// Prints the given number of elements in an array, without printing
+// the curly braces.
+template <typename T>
+void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) {
+ UniversalPrint(a[0], os);
+ for (size_t i = 1; i != count; i++) {
+ *os << ", ";
+ UniversalPrint(a[i], os);
+ }
+}
+
+// Overloads for ::string and ::std::string.
+#if GTEST_HAS_GLOBAL_STRING
+GTEST_API_ void PrintStringTo(const ::string&s, ::std::ostream* os);
+inline void PrintTo(const ::string& s, ::std::ostream* os) {
+ PrintStringTo(s, os);
+}
+#endif // GTEST_HAS_GLOBAL_STRING
+
+GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os);
+inline void PrintTo(const ::std::string& s, ::std::ostream* os) {
+ PrintStringTo(s, os);
+}
+
+// Overloads for ::wstring and ::std::wstring.
+#if GTEST_HAS_GLOBAL_WSTRING
+GTEST_API_ void PrintWideStringTo(const ::wstring&s, ::std::ostream* os);
+inline void PrintTo(const ::wstring& s, ::std::ostream* os) {
+ PrintWideStringTo(s, os);
+}
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os);
+inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) {
+ PrintWideStringTo(s, os);
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_TR1_TUPLE
+// Overload for ::std::tr1::tuple. Needed for printing function arguments,
+// which are packed as tuples.
+
+// Helper function for printing a tuple. T must be instantiated with
+// a tuple type.
+template <typename T>
+void PrintTupleTo(const T& t, ::std::ostream* os);
+
+// Overloaded PrintTo() for tuples of various arities. We support
+// tuples of up-to 10 fields. The following implementation works
+// regardless of whether tr1::tuple is implemented using the
+// non-standard variadic template feature or not.
+
+inline void PrintTo(const ::std::tr1::tuple<>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1>
+void PrintTo(const ::std::tr1::tuple<T1>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2>
+void PrintTo(const ::std::tr1::tuple<T1, T2>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+void PrintTo(
+ const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+#endif // GTEST_HAS_TR1_TUPLE
+
+// Overload for std::pair.
+template <typename T1, typename T2>
+void PrintTo(const ::std::pair<T1, T2>& value, ::std::ostream* os) {
+ *os << '(';
+ // We cannot use UniversalPrint(value.first, os) here, as T1 may be
+ // a reference type. The same for printing value.second.
+ UniversalPrinter<T1>::Print(value.first, os);
+ *os << ", ";
+ UniversalPrinter<T2>::Print(value.second, os);
+ *os << ')';
+}
+
+// Implements printing a non-reference type T by letting the compiler
+// pick the right overload of PrintTo() for T.
+template <typename T>
+class UniversalPrinter {
+ public:
+ // MSVC warns about adding const to a function type, so we want to
+ // disable the warning.
+#ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4180) // Temporarily disables warning 4180.
+#endif // _MSC_VER
+
+ // Note: we deliberately don't call this PrintTo(), as that name
+ // conflicts with ::testing::internal::PrintTo in the body of the
+ // function.
+ static void Print(const T& value, ::std::ostream* os) {
+ // By default, ::testing::internal::PrintTo() is used for printing
+ // the value.
+ //
+ // Thanks to Koenig look-up, if T is a class and has its own
+ // PrintTo() function defined in its namespace, that function will
+ // be visible here. Since it is more specific than the generic ones
+ // in ::testing::internal, it will be picked by the compiler in the
+ // following statement - exactly what we want.
+ PrintTo(value, os);
+ }
+
+#ifdef _MSC_VER
+# pragma warning(pop) // Restores the warning state.
+#endif // _MSC_VER
+};
+
+// UniversalPrintArray(begin, len, os) prints an array of 'len'
+// elements, starting at address 'begin'.
+template <typename T>
+void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) {
+ if (len == 0) {
+ *os << "{}";
+ } else {
+ *os << "{ ";
+ const size_t kThreshold = 18;
+ const size_t kChunkSize = 8;
+ // If the array has more than kThreshold elements, we'll have to
+ // omit some details by printing only the first and the last
+ // kChunkSize elements.
+ // TODO(wan@google.com): let the user control the threshold using a flag.
+ if (len <= kThreshold) {
+ PrintRawArrayTo(begin, len, os);
+ } else {
+ PrintRawArrayTo(begin, kChunkSize, os);
+ *os << ", ..., ";
+ PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os);
+ }
+ *os << " }";
+ }
+}
+// This overload prints a (const) char array compactly.
+GTEST_API_ void UniversalPrintArray(const char* begin,
+ size_t len,
+ ::std::ostream* os);
+
+// Implements printing an array type T[N].
+template <typename T, size_t N>
+class UniversalPrinter<T[N]> {
+ public:
+ // Prints the given array, omitting some elements when there are too
+ // many.
+ static void Print(const T (&a)[N], ::std::ostream* os) {
+ UniversalPrintArray(a, N, os);
+ }
+};
+
+// Implements printing a reference type T&.
+template <typename T>
+class UniversalPrinter<T&> {
+ public:
+ // MSVC warns about adding const to a function type, so we want to
+ // disable the warning.
+#ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4180) // Temporarily disables warning 4180.
+#endif // _MSC_VER
+
+ static void Print(const T& value, ::std::ostream* os) {
+ // Prints the address of the value. We use reinterpret_cast here
+ // as static_cast doesn't compile when T is a function type.
+ *os << "@" << reinterpret_cast<const void*>(&value) << " ";
+
+ // Then prints the value itself.
+ UniversalPrint(value, os);
+ }
+
+#ifdef _MSC_VER
+# pragma warning(pop) // Restores the warning state.
+#endif // _MSC_VER
+};
+
+// Prints a value tersely: for a reference type, the referenced value
+// (but not the address) is printed; for a (const) char pointer, the
+// NUL-terminated string (but not the pointer) is printed.
+template <typename T>
+void UniversalTersePrint(const T& value, ::std::ostream* os) {
+ UniversalPrint(value, os);
+}
+inline void UniversalTersePrint(const char* str, ::std::ostream* os) {
+ if (str == NULL) {
+ *os << "NULL";
+ } else {
+ UniversalPrint(string(str), os);
+ }
+}
+inline void UniversalTersePrint(char* str, ::std::ostream* os) {
+ UniversalTersePrint(static_cast<const char*>(str), os);
+}
+
+// Prints a value using the type inferred by the compiler. The
+// difference between this and UniversalTersePrint() is that for a
+// (const) char pointer, this prints both the pointer and the
+// NUL-terminated string.
+template <typename T>
+void UniversalPrint(const T& value, ::std::ostream* os) {
+ UniversalPrinter<T>::Print(value, os);
+}
+
+#if GTEST_HAS_TR1_TUPLE
+typedef ::std::vector<string> Strings;
+
+// This helper template allows PrintTo() for tuples and
+// UniversalTersePrintTupleFieldsToStrings() to be defined by
+// induction on the number of tuple fields. The idea is that
+// TuplePrefixPrinter<N>::PrintPrefixTo(t, os) prints the first N
+// fields in tuple t, and can be defined in terms of
+// TuplePrefixPrinter<N - 1>.
+
+// The inductive case.
+template <size_t N>
+struct TuplePrefixPrinter {
+ // Prints the first N fields of a tuple.
+ template <typename Tuple>
+ static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) {
+ TuplePrefixPrinter<N - 1>::PrintPrefixTo(t, os);
+ *os << ", ";
+ UniversalPrinter<typename ::std::tr1::tuple_element<N - 1, Tuple>::type>
+ ::Print(::std::tr1::get<N - 1>(t), os);
+ }
+
+ // Tersely prints the first N fields of a tuple to a string vector,
+ // one element for each field.
+ template <typename Tuple>
+ static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) {
+ TuplePrefixPrinter<N - 1>::TersePrintPrefixToStrings(t, strings);
+ ::std::stringstream ss;
+ UniversalTersePrint(::std::tr1::get<N - 1>(t), &ss);
+ strings->push_back(ss.str());
+ }
+};
+
+// Base cases.
+template <>
+struct TuplePrefixPrinter<0> {
+ template <typename Tuple>
+ static void PrintPrefixTo(const Tuple&, ::std::ostream*) {}
+
+ template <typename Tuple>
+ static void TersePrintPrefixToStrings(const Tuple&, Strings*) {}
+};
+// We have to specialize the entire TuplePrefixPrinter<> class
+// template here, even though the definition of
+// TersePrintPrefixToStrings() is the same as the generic version, as
+// Embarcadero (formerly CodeGear, formerly Borland) C++ doesn't
+// support specializing a method template of a class template.
+template <>
+struct TuplePrefixPrinter<1> {
+ template <typename Tuple>
+ static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) {
+ UniversalPrinter<typename ::std::tr1::tuple_element<0, Tuple>::type>::
+ Print(::std::tr1::get<0>(t), os);
+ }
+
+ template <typename Tuple>
+ static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) {
+ ::std::stringstream ss;
+ UniversalTersePrint(::std::tr1::get<0>(t), &ss);
+ strings->push_back(ss.str());
+ }
+};
+
+// Helper function for printing a tuple. T must be instantiated with
+// a tuple type.
+template <typename T>
+void PrintTupleTo(const T& t, ::std::ostream* os) {
+ *os << "(";
+ TuplePrefixPrinter< ::std::tr1::tuple_size<T>::value>::
+ PrintPrefixTo(t, os);
+ *os << ")";
+}
+
+// Prints the fields of a tuple tersely to a string vector, one
+// element for each field. See the comment before
+// UniversalTersePrint() for how we define "tersely".
+template <typename Tuple>
+Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) {
+ Strings result;
+ TuplePrefixPrinter< ::std::tr1::tuple_size<Tuple>::value>::
+ TersePrintPrefixToStrings(value, &result);
+ return result;
+}
+#endif // GTEST_HAS_TR1_TUPLE
+
+} // namespace internal
+
+template <typename T>
+::std::string PrintToString(const T& value) {
+ ::std::stringstream ss;
+ internal::UniversalTersePrint(value, &ss);
+ return ss.str();
+}
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+namespace internal {
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Outputs a message explaining invalid registration of different
+// fixture class for the same test case. This may happen when
+// TEST_P macro is used to define two tests with the same name
+// but in different namespaces.
+GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name,
+ const char* file, int line);
+
+template <typename> class ParamGeneratorInterface;
+template <typename> class ParamGenerator;
+
+// Interface for iterating over elements provided by an implementation
+// of ParamGeneratorInterface<T>.
+template <typename T>
+class ParamIteratorInterface {
+ public:
+ virtual ~ParamIteratorInterface() {}
+ // A pointer to the base generator instance.
+ // Used only for the purposes of iterator comparison
+ // to make sure that two iterators belong to the same generator.
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const = 0;
+ // Advances iterator to point to the next element
+ // provided by the generator. The caller is responsible
+ // for not calling Advance() on an iterator equal to
+ // BaseGenerator()->End().
+ virtual void Advance() = 0;
+ // Clones the iterator object. Used for implementing copy semantics
+ // of ParamIterator<T>.
+ virtual ParamIteratorInterface* Clone() const = 0;
+ // Dereferences the current iterator and provides (read-only) access
+ // to the pointed value. It is the caller's responsibility not to call
+ // Current() on an iterator equal to BaseGenerator()->End().
+ // Used for implementing ParamGenerator<T>::operator*().
+ virtual const T* Current() const = 0;
+ // Determines whether the given iterator and other point to the same
+ // element in the sequence generated by the generator.
+ // Used for implementing ParamGenerator<T>::operator==().
+ virtual bool Equals(const ParamIteratorInterface& other) const = 0;
+};
+
+// Class iterating over elements provided by an implementation of
+// ParamGeneratorInterface<T>. It wraps ParamIteratorInterface<T>
+// and implements the const forward iterator concept.
+template <typename T>
+class ParamIterator {
+ public:
+ typedef T value_type;
+ typedef const T& reference;
+ typedef ptrdiff_t difference_type;
+
+ // ParamIterator assumes ownership of the impl_ pointer.
+ ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {}
+ ParamIterator& operator=(const ParamIterator& other) {
+ if (this != &other)
+ impl_.reset(other.impl_->Clone());
+ return *this;
+ }
+
+ const T& operator*() const { return *impl_->Current(); }
+ const T* operator->() const { return impl_->Current(); }
+ // Prefix version of operator++.
+ ParamIterator& operator++() {
+ impl_->Advance();
+ return *this;
+ }
+ // Postfix version of operator++.
+ ParamIterator operator++(int /*unused*/) {
+ ParamIteratorInterface<T>* clone = impl_->Clone();
+ impl_->Advance();
+ return ParamIterator(clone);
+ }
+ bool operator==(const ParamIterator& other) const {
+ return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_);
+ }
+ bool operator!=(const ParamIterator& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ friend class ParamGenerator<T>;
+ explicit ParamIterator(ParamIteratorInterface<T>* impl) : impl_(impl) {}
+ scoped_ptr<ParamIteratorInterface<T> > impl_;
+};
+
+// ParamGeneratorInterface<T> is the binary interface to access generators
+// defined in other translation units.
+template <typename T>
+class ParamGeneratorInterface {
+ public:
+ typedef T ParamType;
+
+ virtual ~ParamGeneratorInterface() {}
+
+ // Generator interface definition
+ virtual ParamIteratorInterface<T>* Begin() const = 0;
+ virtual ParamIteratorInterface<T>* End() const = 0;
+};
+
+// Wraps ParamGeneratorInterface<T> and provides general generator syntax
+// compatible with the STL Container concept.
+// This class implements copy initialization semantics and the contained
+// ParamGeneratorInterface<T> instance is shared among all copies
+// of the original object. This is possible because that instance is immutable.
+template<typename T>
+class ParamGenerator {
+ public:
+ typedef ParamIterator<T> iterator;
+
+ explicit ParamGenerator(ParamGeneratorInterface<T>* impl) : impl_(impl) {}
+ ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {}
+
+ ParamGenerator& operator=(const ParamGenerator& other) {
+ impl_ = other.impl_;
+ return *this;
+ }
+
+ iterator begin() const { return iterator(impl_->Begin()); }
+ iterator end() const { return iterator(impl_->End()); }
+
+ private:
+ linked_ptr<const ParamGeneratorInterface<T> > impl_;
+};
+
+// Generates values from a range of two comparable values. Can be used to
+// generate sequences of user-defined types that implement operator+() and
+// operator<().
+// This class is used in the Range() function.
+template <typename T, typename IncrementT>
+class RangeGenerator : public ParamGeneratorInterface<T> {
+ public:
+ RangeGenerator(T begin, T end, IncrementT step)
+ : begin_(begin), end_(end),
+ step_(step), end_index_(CalculateEndIndex(begin, end, step)) {}
+ virtual ~RangeGenerator() {}
+
+ virtual ParamIteratorInterface<T>* Begin() const {
+ return new Iterator(this, begin_, 0, step_);
+ }
+ virtual ParamIteratorInterface<T>* End() const {
+ return new Iterator(this, end_, end_index_, step_);
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<T> {
+ public:
+ Iterator(const ParamGeneratorInterface<T>* base, T value, int index,
+ IncrementT step)
+ : base_(base), value_(value), index_(index), step_(step) {}
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
+ return base_;
+ }
+ virtual void Advance() {
+ value_ = value_ + step_;
+ index_++;
+ }
+ virtual ParamIteratorInterface<T>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const T* Current() const { return &value_; }
+ virtual bool Equals(const ParamIteratorInterface<T>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const int other_index =
+ CheckedDowncastToActualType<const Iterator>(&other)->index_;
+ return index_ == other_index;
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : ParamIteratorInterface<T>(),
+ base_(other.base_), value_(other.value_), index_(other.index_),
+ step_(other.step_) {}
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<T>* const base_;
+ T value_;
+ int index_;
+ const IncrementT step_;
+ }; // class RangeGenerator::Iterator
+
+ static int CalculateEndIndex(const T& begin,
+ const T& end,
+ const IncrementT& step) {
+ int end_index = 0;
+ for (T i = begin; i < end; i = i + step)
+ end_index++;
+ return end_index;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const RangeGenerator& other);
+
+ const T begin_;
+ const T end_;
+ const IncrementT step_;
+ // The index for the end() iterator. All the elements in the generated
+ // sequence are indexed (0-based) to aid iterator comparison.
+ const int end_index_;
+}; // class RangeGenerator
+
+
+// Generates values from a pair of STL-style iterators. Used in the
+// ValuesIn() function. The elements are copied from the source range
+// since the source can be located on the stack, and the generator
+// is likely to persist beyond that stack frame.
+template <typename T>
+class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface<T> {
+ public:
+ template <typename ForwardIterator>
+ ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end)
+ : container_(begin, end) {}
+ virtual ~ValuesInIteratorRangeGenerator() {}
+
+ virtual ParamIteratorInterface<T>* Begin() const {
+ return new Iterator(this, container_.begin());
+ }
+ virtual ParamIteratorInterface<T>* End() const {
+ return new Iterator(this, container_.end());
+ }
+
+ private:
+ typedef typename ::std::vector<T> ContainerType;
+
+ class Iterator : public ParamIteratorInterface<T> {
+ public:
+ Iterator(const ParamGeneratorInterface<T>* base,
+ typename ContainerType::const_iterator iterator)
+ : base_(base), iterator_(iterator) {}
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
+ return base_;
+ }
+ virtual void Advance() {
+ ++iterator_;
+ value_.reset();
+ }
+ virtual ParamIteratorInterface<T>* Clone() const {
+ return new Iterator(*this);
+ }
+ // We need to use cached value referenced by iterator_ because *iterator_
+ // can return a temporary object (and of type other then T), so just
+ // having "return &*iterator_;" doesn't work.
+ // value_ is updated here and not in Advance() because Advance()
+ // can advance iterator_ beyond the end of the range, and we cannot
+ // detect that fact. The client code, on the other hand, is
+ // responsible for not calling Current() on an out-of-range iterator.
+ virtual const T* Current() const {
+ if (value_.get() == NULL)
+ value_.reset(new T(*iterator_));
+ return value_.get();
+ }
+ virtual bool Equals(const ParamIteratorInterface<T>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ return iterator_ ==
+ CheckedDowncastToActualType<const Iterator>(&other)->iterator_;
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ // The explicit constructor call suppresses a false warning
+ // emitted by gcc when supplied with the -Wextra option.
+ : ParamIteratorInterface<T>(),
+ base_(other.base_),
+ iterator_(other.iterator_) {}
+
+ const ParamGeneratorInterface<T>* const base_;
+ typename ContainerType::const_iterator iterator_;
+ // A cached value of *iterator_. We keep it here to allow access by
+ // pointer in the wrapping iterator's operator->().
+ // value_ needs to be mutable to be accessed in Current().
+ // Use of scoped_ptr helps manage cached value's lifetime,
+ // which is bound by the lifespan of the iterator itself.
+ mutable scoped_ptr<const T> value_;
+ }; // class ValuesInIteratorRangeGenerator::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const ValuesInIteratorRangeGenerator& other);
+
+ const ContainerType container_;
+}; // class ValuesInIteratorRangeGenerator
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Stores a parameter value and later creates tests parameterized with that
+// value.
+template <class TestClass>
+class ParameterizedTestFactory : public TestFactoryBase {
+ public:
+ typedef typename TestClass::ParamType ParamType;
+ explicit ParameterizedTestFactory(ParamType parameter) :
+ parameter_(parameter) {}
+ virtual Test* CreateTest() {
+ TestClass::SetParam(&parameter_);
+ return new TestClass();
+ }
+
+ private:
+ const ParamType parameter_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// TestMetaFactoryBase is a base class for meta-factories that create
+// test factories for passing into MakeAndRegisterTestInfo function.
+template <class ParamType>
+class TestMetaFactoryBase {
+ public:
+ virtual ~TestMetaFactoryBase() {}
+
+ virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0;
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// TestMetaFactory creates test factories for passing into
+// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives
+// ownership of test factory pointer, same factory object cannot be passed
+// into that method twice. But ParameterizedTestCaseInfo is going to call
+// it for each Test/Parameter value combination. Thus it needs meta factory
+// creator class.
+template <class TestCase>
+class TestMetaFactory
+ : public TestMetaFactoryBase<typename TestCase::ParamType> {
+ public:
+ typedef typename TestCase::ParamType ParamType;
+
+ TestMetaFactory() {}
+
+ virtual TestFactoryBase* CreateTestFactory(ParamType parameter) {
+ return new ParameterizedTestFactory<TestCase>(parameter);
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseInfoBase is a generic interface
+// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase
+// accumulates test information provided by TEST_P macro invocations
+// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations
+// and uses that information to register all resulting test instances
+// in RegisterTests method. The ParameterizeTestCaseRegistry class holds
+// a collection of pointers to the ParameterizedTestCaseInfo objects
+// and calls RegisterTests() on each of them when asked.
+class ParameterizedTestCaseInfoBase {
+ public:
+ virtual ~ParameterizedTestCaseInfoBase() {}
+
+ // Base part of test case name for display purposes.
+ virtual const string& GetTestCaseName() const = 0;
+ // Test case id to verify identity.
+ virtual TypeId GetTestCaseTypeId() const = 0;
+ // UnitTest class invokes this method to register tests in this
+ // test case right before running them in RUN_ALL_TESTS macro.
+ // This method should not be called more then once on any single
+ // instance of a ParameterizedTestCaseInfoBase derived class.
+ virtual void RegisterTests() = 0;
+
+ protected:
+ ParameterizedTestCaseInfoBase() {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P
+// macro invocations for a particular test case and generators
+// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that
+// test case. It registers tests with all values generated by all
+// generators when asked.
+template <class TestCase>
+class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase {
+ public:
+ // ParamType and GeneratorCreationFunc are private types but are required
+ // for declarations of public methods AddTestPattern() and
+ // AddTestCaseInstantiation().
+ typedef typename TestCase::ParamType ParamType;
+ // A function that returns an instance of appropriate generator type.
+ typedef ParamGenerator<ParamType>(GeneratorCreationFunc)();
+
+ explicit ParameterizedTestCaseInfo(const char* name)
+ : test_case_name_(name) {}
+
+ // Test case base name for display purposes.
+ virtual const string& GetTestCaseName() const { return test_case_name_; }
+ // Test case id to verify identity.
+ virtual TypeId GetTestCaseTypeId() const { return GetTypeId<TestCase>(); }
+ // TEST_P macro uses AddTestPattern() to record information
+ // about a single test in a LocalTestInfo structure.
+ // test_case_name is the base name of the test case (without invocation
+ // prefix). test_base_name is the name of an individual test without
+ // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is
+ // test case base name and DoBar is test base name.
+ void AddTestPattern(const char* test_case_name,
+ const char* test_base_name,
+ TestMetaFactoryBase<ParamType>* meta_factory) {
+ tests_.push_back(linked_ptr<TestInfo>(new TestInfo(test_case_name,
+ test_base_name,
+ meta_factory)));
+ }
+ // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information
+ // about a generator.
+ int AddTestCaseInstantiation(const string& instantiation_name,
+ GeneratorCreationFunc* func,
+ const char* /* file */,
+ int /* line */) {
+ instantiations_.push_back(::std::make_pair(instantiation_name, func));
+ return 0; // Return value used only to run this method in namespace scope.
+ }
+ // UnitTest class invokes this method to register tests in this test case
+ // test cases right before running tests in RUN_ALL_TESTS macro.
+ // This method should not be called more then once on any single
+ // instance of a ParameterizedTestCaseInfoBase derived class.
+ // UnitTest has a guard to prevent from calling this method more then once.
+ virtual void RegisterTests() {
+ for (typename TestInfoContainer::iterator test_it = tests_.begin();
+ test_it != tests_.end(); ++test_it) {
+ linked_ptr<TestInfo> test_info = *test_it;
+ for (typename InstantiationContainer::iterator gen_it =
+ instantiations_.begin(); gen_it != instantiations_.end();
+ ++gen_it) {
+ const string& instantiation_name = gen_it->first;
+ ParamGenerator<ParamType> generator((*gen_it->second)());
+
+ Message test_case_name_stream;
+ if ( !instantiation_name.empty() )
+ test_case_name_stream << instantiation_name << "/";
+ test_case_name_stream << test_info->test_case_base_name;
+
+ int i = 0;
+ for (typename ParamGenerator<ParamType>::iterator param_it =
+ generator.begin();
+ param_it != generator.end(); ++param_it, ++i) {
+ Message test_name_stream;
+ test_name_stream << test_info->test_base_name << "/" << i;
+ MakeAndRegisterTestInfo(
+ test_case_name_stream.GetString().c_str(),
+ test_name_stream.GetString().c_str(),
+ NULL, // No type parameter.
+ PrintToString(*param_it).c_str(),
+ GetTestCaseTypeId(),
+ TestCase::SetUpTestCase,
+ TestCase::TearDownTestCase,
+ test_info->test_meta_factory->CreateTestFactory(*param_it));
+ } // for param_it
+ } // for gen_it
+ } // for test_it
+ } // RegisterTests
+
+ private:
+ // LocalTestInfo structure keeps information about a single test registered
+ // with TEST_P macro.
+ struct TestInfo {
+ TestInfo(const char* a_test_case_base_name,
+ const char* a_test_base_name,
+ TestMetaFactoryBase<ParamType>* a_test_meta_factory) :
+ test_case_base_name(a_test_case_base_name),
+ test_base_name(a_test_base_name),
+ test_meta_factory(a_test_meta_factory) {}
+
+ const string test_case_base_name;
+ const string test_base_name;
+ const scoped_ptr<TestMetaFactoryBase<ParamType> > test_meta_factory;
+ };
+ typedef ::std::vector<linked_ptr<TestInfo> > TestInfoContainer;
+ // Keeps pairs of <Instantiation name, Sequence generator creation function>
+ // received from INSTANTIATE_TEST_CASE_P macros.
+ typedef ::std::vector<std::pair<string, GeneratorCreationFunc*> >
+ InstantiationContainer;
+
+ const string test_case_name_;
+ TestInfoContainer tests_;
+ InstantiationContainer instantiations_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo);
+}; // class ParameterizedTestCaseInfo
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase
+// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P
+// macros use it to locate their corresponding ParameterizedTestCaseInfo
+// descriptors.
+class ParameterizedTestCaseRegistry {
+ public:
+ ParameterizedTestCaseRegistry() {}
+ ~ParameterizedTestCaseRegistry() {
+ for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+ it != test_case_infos_.end(); ++it) {
+ delete *it;
+ }
+ }
+
+ // Looks up or creates and returns a structure containing information about
+ // tests and instantiations of a particular test case.
+ template <class TestCase>
+ ParameterizedTestCaseInfo<TestCase>* GetTestCasePatternHolder(
+ const char* test_case_name,
+ const char* file,
+ int line) {
+ ParameterizedTestCaseInfo<TestCase>* typed_test_info = NULL;
+ for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+ it != test_case_infos_.end(); ++it) {
+ if ((*it)->GetTestCaseName() == test_case_name) {
+ if ((*it)->GetTestCaseTypeId() != GetTypeId<TestCase>()) {
+ // Complain about incorrect usage of Google Test facilities
+ // and terminate the program since we cannot guaranty correct
+ // test case setup and tear-down in this case.
+ ReportInvalidTestCaseType(test_case_name, file, line);
+ posix::Abort();
+ } else {
+ // At this point we are sure that the object we found is of the same
+ // type we are looking for, so we downcast it to that type
+ // without further checks.
+ typed_test_info = CheckedDowncastToActualType<
+ ParameterizedTestCaseInfo<TestCase> >(*it);
+ }
+ break;
+ }
+ }
+ if (typed_test_info == NULL) {
+ typed_test_info = new ParameterizedTestCaseInfo<TestCase>(test_case_name);
+ test_case_infos_.push_back(typed_test_info);
+ }
+ return typed_test_info;
+ }
+ void RegisterTests() {
+ for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+ it != test_case_infos_.end(); ++it) {
+ (*it)->RegisterTests();
+ }
+ }
+
+ private:
+ typedef ::std::vector<ParameterizedTestCaseInfoBase*> TestCaseInfoContainer;
+
+ TestCaseInfoContainer test_case_infos_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry);
+};
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+// This file was GENERATED by command:
+// pump.py gtest-param-util-generated.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vladl@google.com (Vlad Losev)
+
+// Type and function utilities for implementing parameterized tests.
+// This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+// Currently Google Test supports at most 50 arguments in Values,
+// and at most 10 arguments in Combine. Please contact
+// googletestframework@googlegroups.com if you need more.
+// Please note that the number of arguments to Combine is limited
+// by the maximum arity of the implementation of tr1::tuple which is
+// currently set at 10.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Forward declarations of ValuesIn(), which is implemented in
+// include/gtest/gtest-param-test.h.
+template <typename ForwardIterator>
+internal::ParamGenerator<
+ typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>
+ValuesIn(ForwardIterator begin, ForwardIterator end);
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]);
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container);
+
+namespace internal {
+
+// Used in the Values() function to provide polymorphic capabilities.
+template <typename T1>
+class ValueArray1 {
+ public:
+ explicit ValueArray1(T1 v1) : v1_(v1) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const { return ValuesIn(&v1_, &v1_ + 1); }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray1& other);
+
+ const T1 v1_;
+};
+
+template <typename T1, typename T2>
+class ValueArray2 {
+ public:
+ ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray2& other);
+
+ const T1 v1_;
+ const T2 v2_;
+};
+
+template <typename T1, typename T2, typename T3>
+class ValueArray3 {
+ public:
+ ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray3& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4>
+class ValueArray4 {
+ public:
+ ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray4& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+class ValueArray5 {
+ public:
+ ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray5& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+class ValueArray6 {
+ public:
+ ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray6& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+class ValueArray7 {
+ public:
+ ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray7& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+class ValueArray8 {
+ public:
+ ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray8& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+class ValueArray9 {
+ public:
+ ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray9& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+class ValueArray10 {
+ public:
+ ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray10& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+class ValueArray11 {
+ public:
+ ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray11& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+class ValueArray12 {
+ public:
+ ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray12& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+class ValueArray13 {
+ public:
+ ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray13& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+class ValueArray14 {
+ public:
+ ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray14& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+class ValueArray15 {
+ public:
+ ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray15& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+class ValueArray16 {
+ public:
+ ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray16& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+class ValueArray17 {
+ public:
+ ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+ T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray17& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+class ValueArray18 {
+ public:
+ ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray18& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+class ValueArray19 {
+ public:
+ ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray19& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+class ValueArray20 {
+ public:
+ ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray20& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+class ValueArray21 {
+ public:
+ ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray21& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+class ValueArray22 {
+ public:
+ ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray22& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+class ValueArray23 {
+ public:
+ ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_,
+ v23_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray23& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+class ValueArray24 {
+ public:
+ ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray24& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+class ValueArray25 {
+ public:
+ ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+ T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray25& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+class ValueArray26 {
+ public:
+ ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray26& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+class ValueArray27 {
+ public:
+ ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+ v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+ v26_(v26), v27_(v27) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray27& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+class ValueArray28 {
+ public:
+ ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+ v25_(v25), v26_(v26), v27_(v27), v28_(v28) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray28& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+class ValueArray29 {
+ public:
+ ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+ v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray29& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+class ValueArray30 {
+ public:
+ ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray30& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+class ValueArray31 {
+ public:
+ ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray31& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+class ValueArray32 {
+ public:
+ ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+ v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray32& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+class ValueArray33 {
+ public:
+ ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
+ T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray33& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+class ValueArray34 {
+ public:
+ ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray34& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+class ValueArray35 {
+ public:
+ ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+ v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+ v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
+ v32_(v32), v33_(v33), v34_(v34), v35_(v35) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_,
+ v35_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray35& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+class ValueArray36 {
+ public:
+ ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+ v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
+ v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray36& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+class ValueArray37 {
+ public:
+ ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+ v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
+ v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
+ v36_(v36), v37_(v37) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray37& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+class ValueArray38 {
+ public:
+ ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray38& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+class ValueArray39 {
+ public:
+ ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray39& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+class ValueArray40 {
+ public:
+ ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+ v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
+ v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
+ v40_(v40) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray40& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+class ValueArray41 {
+ public:
+ ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
+ T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray41& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+class ValueArray42 {
+ public:
+ ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41), v42_(v42) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray42& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+class ValueArray43 {
+ public:
+ ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+ v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+ v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
+ v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37),
+ v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray43& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+class ValueArray44 {
+ public:
+ ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+ v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
+ v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36),
+ v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42),
+ v43_(v43), v44_(v44) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray44& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+class ValueArray45 {
+ public:
+ ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+ v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
+ v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
+ v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41),
+ v42_(v42), v43_(v43), v44_(v44), v45_(v45) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray45& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+class ValueArray46 {
+ public:
+ ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
+ v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray46& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+class ValueArray47 {
+ public:
+ ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
+ v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46),
+ v47_(v47) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_,
+ v47_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray47& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+class ValueArray48 {
+ public:
+ ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+ v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
+ v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
+ v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45),
+ v46_(v46), v47_(v47), v48_(v48) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_,
+ v48_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray48& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+ const T48 v48_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+class ValueArray49 {
+ public:
+ ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48,
+ T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
+ v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_,
+ v48_, v49_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray49& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+ const T48 v48_;
+ const T49 v49_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+class ValueArray50 {
+ public:
+ ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49,
+ T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
+ v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_,
+ v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_,
+ v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_,
+ v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_,
+ v48_, v49_, v50_};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray50& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+ const T48 v48_;
+ const T49 v49_;
+ const T50 v50_;
+};
+
+# if GTEST_HAS_COMBINE
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Generates values from the Cartesian product of values produced
+// by the argument generators.
+//
+template <typename T1, typename T2>
+class CartesianProductGenerator2
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2> ParamType;
+
+ CartesianProductGenerator2(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2)
+ : g1_(g1), g2_(g2) {}
+ virtual ~CartesianProductGenerator2() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current2_;
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator2::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator2& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+}; // class CartesianProductGenerator2
+
+
+template <typename T1, typename T2, typename T3>
+class CartesianProductGenerator3
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3> ParamType;
+
+ CartesianProductGenerator3(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3)
+ : g1_(g1), g2_(g2), g3_(g3) {}
+ virtual ~CartesianProductGenerator3() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current3_;
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator3::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator3& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+}; // class CartesianProductGenerator3
+
+
+template <typename T1, typename T2, typename T3, typename T4>
+class CartesianProductGenerator4
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4> ParamType;
+
+ CartesianProductGenerator4(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
+ virtual ~CartesianProductGenerator4() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current4_;
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator4::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator4& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+}; // class CartesianProductGenerator4
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+class CartesianProductGenerator5
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5> ParamType;
+
+ CartesianProductGenerator5(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
+ virtual ~CartesianProductGenerator5() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current5_;
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator5::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator5& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+}; // class CartesianProductGenerator5
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+class CartesianProductGenerator6
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5,
+ T6> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6> ParamType;
+
+ CartesianProductGenerator6(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
+ virtual ~CartesianProductGenerator6() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current6_;
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator6::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator6& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+}; // class CartesianProductGenerator6
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+class CartesianProductGenerator7
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7> ParamType;
+
+ CartesianProductGenerator7(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
+ virtual ~CartesianProductGenerator7() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current7_;
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator7::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator7& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+}; // class CartesianProductGenerator7
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+class CartesianProductGenerator8
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7, T8> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8> ParamType;
+
+ CartesianProductGenerator8(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+ const ParamGenerator<T8>& g8)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
+ g8_(g8) {}
+ virtual ~CartesianProductGenerator8() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin(), g8_, g8_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+ g8_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7,
+ const ParamGenerator<T8>& g8,
+ const typename ParamGenerator<T8>::iterator& current8)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+ begin8_(g8.begin()), end8_(g8.end()), current8_(current8) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current8_;
+ if (current8_ == end8_) {
+ current8_ = begin8_;
+ ++current7_;
+ }
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_ &&
+ current8_ == typed_other->current8_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_),
+ begin8_(other.begin8_),
+ end8_(other.end8_),
+ current8_(other.current8_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_, *current8_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_ ||
+ current8_ == end8_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ const typename ParamGenerator<T8>::iterator begin8_;
+ const typename ParamGenerator<T8>::iterator end8_;
+ typename ParamGenerator<T8>::iterator current8_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator8::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator8& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+ const ParamGenerator<T8> g8_;
+}; // class CartesianProductGenerator8
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+class CartesianProductGenerator9
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7, T8, T9> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9> ParamType;
+
+ CartesianProductGenerator9(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+ const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9) {}
+ virtual ~CartesianProductGenerator9() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+ g8_.end(), g9_, g9_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7,
+ const ParamGenerator<T8>& g8,
+ const typename ParamGenerator<T8>::iterator& current8,
+ const ParamGenerator<T9>& g9,
+ const typename ParamGenerator<T9>::iterator& current9)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+ begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
+ begin9_(g9.begin()), end9_(g9.end()), current9_(current9) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current9_;
+ if (current9_ == end9_) {
+ current9_ = begin9_;
+ ++current8_;
+ }
+ if (current8_ == end8_) {
+ current8_ = begin8_;
+ ++current7_;
+ }
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_ &&
+ current8_ == typed_other->current8_ &&
+ current9_ == typed_other->current9_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_),
+ begin8_(other.begin8_),
+ end8_(other.end8_),
+ current8_(other.current8_),
+ begin9_(other.begin9_),
+ end9_(other.end9_),
+ current9_(other.current9_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_, *current8_,
+ *current9_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_ ||
+ current8_ == end8_ ||
+ current9_ == end9_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ const typename ParamGenerator<T8>::iterator begin8_;
+ const typename ParamGenerator<T8>::iterator end8_;
+ typename ParamGenerator<T8>::iterator current8_;
+ const typename ParamGenerator<T9>::iterator begin9_;
+ const typename ParamGenerator<T9>::iterator end9_;
+ typename ParamGenerator<T9>::iterator current9_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator9::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator9& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+ const ParamGenerator<T8> g8_;
+ const ParamGenerator<T9> g9_;
+}; // class CartesianProductGenerator9
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+class CartesianProductGenerator10
+ : public ParamGeneratorInterface< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7, T8, T9, T10> > {
+ public:
+ typedef ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> ParamType;
+
+ CartesianProductGenerator10(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+ const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9,
+ const ParamGenerator<T10>& g10)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9), g10_(g10) {}
+ virtual ~CartesianProductGenerator10() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+ g8_.end(), g9_, g9_.end(), g10_, g10_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7,
+ const ParamGenerator<T8>& g8,
+ const typename ParamGenerator<T8>::iterator& current8,
+ const ParamGenerator<T9>& g9,
+ const typename ParamGenerator<T9>::iterator& current9,
+ const ParamGenerator<T10>& g10,
+ const typename ParamGenerator<T10>::iterator& current10)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+ begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
+ begin9_(g9.begin()), end9_(g9.end()), current9_(current9),
+ begin10_(g10.begin()), end10_(g10.end()), current10_(current10) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current10_;
+ if (current10_ == end10_) {
+ current10_ = begin10_;
+ ++current9_;
+ }
+ if (current9_ == end9_) {
+ current9_ = begin9_;
+ ++current8_;
+ }
+ if (current8_ == end8_) {
+ current8_ = begin8_;
+ ++current7_;
+ }
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_ &&
+ current8_ == typed_other->current8_ &&
+ current9_ == typed_other->current9_ &&
+ current10_ == typed_other->current10_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_),
+ begin8_(other.begin8_),
+ end8_(other.end8_),
+ current8_(other.current8_),
+ begin9_(other.begin9_),
+ end9_(other.end9_),
+ current9_(other.current9_),
+ begin10_(other.begin10_),
+ end10_(other.end10_),
+ current10_(other.current10_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_, *current8_,
+ *current9_, *current10_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_ ||
+ current8_ == end8_ ||
+ current9_ == end9_ ||
+ current10_ == end10_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ const typename ParamGenerator<T8>::iterator begin8_;
+ const typename ParamGenerator<T8>::iterator end8_;
+ typename ParamGenerator<T8>::iterator current8_;
+ const typename ParamGenerator<T9>::iterator begin9_;
+ const typename ParamGenerator<T9>::iterator end9_;
+ typename ParamGenerator<T9>::iterator current9_;
+ const typename ParamGenerator<T10>::iterator begin10_;
+ const typename ParamGenerator<T10>::iterator end10_;
+ typename ParamGenerator<T10>::iterator current10_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator10::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator10& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+ const ParamGenerator<T8> g8_;
+ const ParamGenerator<T9> g9_;
+ const ParamGenerator<T10> g10_;
+}; // class CartesianProductGenerator10
+
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Helper classes providing Combine() with polymorphic features. They allow
+// casting CartesianProductGeneratorN<T> to ParamGenerator<U> if T is
+// convertible to U.
+//
+template <class Generator1, class Generator2>
+class CartesianProductHolder2 {
+ public:
+CartesianProductHolder2(const Generator1& g1, const Generator2& g2)
+ : g1_(g1), g2_(g2) {}
+ template <typename T1, typename T2>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2> >(
+ new CartesianProductGenerator2<T1, T2>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder2& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+}; // class CartesianProductHolder2
+
+template <class Generator1, class Generator2, class Generator3>
+class CartesianProductHolder3 {
+ public:
+CartesianProductHolder3(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3)
+ : g1_(g1), g2_(g2), g3_(g3) {}
+ template <typename T1, typename T2, typename T3>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3> >(
+ new CartesianProductGenerator3<T1, T2, T3>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder3& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+}; // class CartesianProductHolder3
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4>
+class CartesianProductHolder4 {
+ public:
+CartesianProductHolder4(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
+ template <typename T1, typename T2, typename T3, typename T4>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4> >(
+ new CartesianProductGenerator4<T1, T2, T3, T4>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder4& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+}; // class CartesianProductHolder4
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5>
+class CartesianProductHolder5 {
+ public:
+CartesianProductHolder5(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5> >(
+ new CartesianProductGenerator5<T1, T2, T3, T4, T5>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder5& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+}; // class CartesianProductHolder5
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6>
+class CartesianProductHolder6 {
+ public:
+CartesianProductHolder6(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6> >(
+ new CartesianProductGenerator6<T1, T2, T3, T4, T5, T6>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder6& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+}; // class CartesianProductHolder6
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7>
+class CartesianProductHolder7 {
+ public:
+CartesianProductHolder7(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6,
+ T7> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7> >(
+ new CartesianProductGenerator7<T1, T2, T3, T4, T5, T6, T7>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder7& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+}; // class CartesianProductHolder7
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7,
+ class Generator8>
+class CartesianProductHolder8 {
+ public:
+CartesianProductHolder8(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7, const Generator8& g8)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
+ g8_(g8) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7,
+ T8> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8> >(
+ new CartesianProductGenerator8<T1, T2, T3, T4, T5, T6, T7, T8>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_),
+ static_cast<ParamGenerator<T8> >(g8_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder8& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+ const Generator8 g8_;
+}; // class CartesianProductHolder8
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7,
+ class Generator8, class Generator9>
+class CartesianProductHolder9 {
+ public:
+CartesianProductHolder9(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7, const Generator8& g8,
+ const Generator9& g9)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9> >(
+ new CartesianProductGenerator9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_),
+ static_cast<ParamGenerator<T8> >(g8_),
+ static_cast<ParamGenerator<T9> >(g9_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder9& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+ const Generator8 g8_;
+ const Generator9 g9_;
+}; // class CartesianProductHolder9
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7,
+ class Generator8, class Generator9, class Generator10>
+class CartesianProductHolder10 {
+ public:
+CartesianProductHolder10(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7, const Generator8& g8,
+ const Generator9& g9, const Generator10& g10)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9), g10_(g10) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+ operator ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9, T10> >() const {
+ return ParamGenerator< ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9, T10> >(
+ new CartesianProductGenerator10<T1, T2, T3, T4, T5, T6, T7, T8, T9,
+ T10>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_),
+ static_cast<ParamGenerator<T8> >(g8_),
+ static_cast<ParamGenerator<T9> >(g9_),
+ static_cast<ParamGenerator<T10> >(g10_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder10& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+ const Generator8 g8_;
+ const Generator9 g9_;
+ const Generator10 g10_;
+}; // class CartesianProductHolder10
+
+# endif // GTEST_HAS_COMBINE
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Functions producing parameter generators.
+//
+// Google Test uses these generators to produce parameters for value-
+// parameterized tests. When a parameterized test case is instantiated
+// with a particular generator, Google Test creates and runs tests
+// for each element in the sequence produced by the generator.
+//
+// In the following sample, tests from test case FooTest are instantiated
+// each three times with parameter values 3, 5, and 8:
+//
+// class FooTest : public TestWithParam<int> { ... };
+//
+// TEST_P(FooTest, TestThis) {
+// }
+// TEST_P(FooTest, TestThat) {
+// }
+// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8));
+//
+
+// Range() returns generators providing sequences of values in a range.
+//
+// Synopsis:
+// Range(start, end)
+// - returns a generator producing a sequence of values {start, start+1,
+// start+2, ..., }.
+// Range(start, end, step)
+// - returns a generator producing a sequence of values {start, start+step,
+// start+step+step, ..., }.
+// Notes:
+// * The generated sequences never include end. For example, Range(1, 5)
+// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2)
+// returns a generator producing {1, 3, 5, 7}.
+// * start and end must have the same type. That type may be any integral or
+// floating-point type or a user defined type satisfying these conditions:
+// * It must be assignable (have operator=() defined).
+// * It must have operator+() (operator+(int-compatible type) for
+// two-operand version).
+// * It must have operator<() defined.
+// Elements in the resulting sequences will also have that type.
+// * Condition start < end must be satisfied in order for resulting sequences
+// to contain any elements.
+//
+template <typename T, typename IncrementT>
+internal::ParamGenerator<T> Range(T start, T end, IncrementT step) {
+ return internal::ParamGenerator<T>(
+ new internal::RangeGenerator<T, IncrementT>(start, end, step));
+}
+
+template <typename T>
+internal::ParamGenerator<T> Range(T start, T end) {
+ return Range(start, end, 1);
+}
+
+// ValuesIn() function allows generation of tests with parameters coming from
+// a container.
+//
+// Synopsis:
+// ValuesIn(const T (&array)[N])
+// - returns a generator producing sequences with elements from
+// a C-style array.
+// ValuesIn(const Container& container)
+// - returns a generator producing sequences with elements from
+// an STL-style container.
+// ValuesIn(Iterator begin, Iterator end)
+// - returns a generator producing sequences with elements from
+// a range [begin, end) defined by a pair of STL-style iterators. These
+// iterators can also be plain C pointers.
+//
+// Please note that ValuesIn copies the values from the containers
+// passed in and keeps them to generate tests in RUN_ALL_TESTS().
+//
+// Examples:
+//
+// This instantiates tests from test case StringTest
+// each with C-string values of "foo", "bar", and "baz":
+//
+// const char* strings[] = {"foo", "bar", "baz"};
+// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings));
+//
+// This instantiates tests from test case StlStringTest
+// each with STL strings with values "a" and "b":
+//
+// ::std::vector< ::std::string> GetParameterStrings() {
+// ::std::vector< ::std::string> v;
+// v.push_back("a");
+// v.push_back("b");
+// return v;
+// }
+//
+// INSTANTIATE_TEST_CASE_P(CharSequence,
+// StlStringTest,
+// ValuesIn(GetParameterStrings()));
+//
+//
+// This will also instantiate tests from CharTest
+// each with parameter values 'a' and 'b':
+//
+// ::std::list<char> GetParameterChars() {
+// ::std::list<char> list;
+// list.push_back('a');
+// list.push_back('b');
+// return list;
+// }
+// ::std::list<char> l = GetParameterChars();
+// INSTANTIATE_TEST_CASE_P(CharSequence2,
+// CharTest,
+// ValuesIn(l.begin(), l.end()));
+//
+template <typename ForwardIterator>
+internal::ParamGenerator<
+ typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>
+ValuesIn(ForwardIterator begin, ForwardIterator end) {
+ typedef typename ::testing::internal::IteratorTraits<ForwardIterator>
+ ::value_type ParamType;
+ return internal::ParamGenerator<ParamType>(
+ new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
+}
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) {
+ return ValuesIn(array, array + N);
+}
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container) {
+ return ValuesIn(container.begin(), container.end());
+}
+
+// Values() allows generating tests from explicitly specified list of
+// parameters.
+//
+// Synopsis:
+// Values(T v1, T v2, ..., T vN)
+// - returns a generator producing sequences with elements v1, v2, ..., vN.
+//
+// For example, this instantiates tests from test case BarTest each
+// with values "one", "two", and "three":
+//
+// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three"));
+//
+// This instantiates tests from test case BazTest each with values 1, 2, 3.5.
+// The exact type of values will depend on the type of parameter in BazTest.
+//
+// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
+//
+// Currently, Values() supports from 1 to 50 parameters.
+//
+template <typename T1>
+internal::ValueArray1<T1> Values(T1 v1) {
+ return internal::ValueArray1<T1>(v1);
+}
+
+template <typename T1, typename T2>
+internal::ValueArray2<T1, T2> Values(T1 v1, T2 v2) {
+ return internal::ValueArray2<T1, T2>(v1, v2);
+}
+
+template <typename T1, typename T2, typename T3>
+internal::ValueArray3<T1, T2, T3> Values(T1 v1, T2 v2, T3 v3) {
+ return internal::ValueArray3<T1, T2, T3>(v1, v2, v3);
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+internal::ValueArray4<T1, T2, T3, T4> Values(T1 v1, T2 v2, T3 v3, T4 v4) {
+ return internal::ValueArray4<T1, T2, T3, T4>(v1, v2, v3, v4);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+internal::ValueArray5<T1, T2, T3, T4, T5> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5) {
+ return internal::ValueArray5<T1, T2, T3, T4, T5>(v1, v2, v3, v4, v5);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+internal::ValueArray6<T1, T2, T3, T4, T5, T6> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6) {
+ return internal::ValueArray6<T1, T2, T3, T4, T5, T6>(v1, v2, v3, v4, v5, v6);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6, T7 v7) {
+ return internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7>(v1, v2, v3, v4, v5,
+ v6, v7);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) {
+ return internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8>(v1, v2, v3, v4,
+ v5, v6, v7, v8);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) {
+ return internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(v1, v2, v3,
+ v4, v5, v6, v7, v8, v9);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> Values(T1 v1,
+ T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) {
+ return internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>(v1,
+ v2, v3, v4, v5, v6, v7, v8, v9, v10);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
+ T11> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11) {
+ return internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
+ T11>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12) {
+ return internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13) {
+ return internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) {
+ return internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+ v14);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) {
+ return internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+ v13, v14, v15);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16) {
+ return internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17) {
+ return internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+ v11, v12, v13, v14, v15, v16, v17);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+ T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18) {
+ return internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+ v10, v11, v12, v13, v14, v15, v16, v17, v18);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+ T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+ T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) {
+ return internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19>(v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) {
+ return internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20>(v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) {
+ return internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>(v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22) {
+ return internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>(v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23) {
+ return internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>(v1, v2, v3,
+ v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24) {
+ return internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>(v1, v2,
+ v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
+ v19, v20, v21, v22, v23, v24);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Values(T1 v1,
+ T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
+ T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
+ T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) {
+ return internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25>(v1,
+ v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
+ v18, v19, v20, v21, v22, v23, v24, v25);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26) {
+ return internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27) {
+ return internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
+ v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28) {
+ return internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+ v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
+ v28);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29) {
+ return internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+ v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
+ v27, v28, v29);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+ T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+ T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) {
+ return internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
+ v26, v27, v28, v29, v30);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) {
+ return internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+ v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
+ v25, v26, v27, v28, v29, v30, v31);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32) {
+ return internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+ v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+ T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33) {
+ return internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33>(v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32, v33);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+ T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+ T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
+ T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
+ T31 v31, T32 v32, T33 v33, T34 v34) {
+ return internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34>(v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
+ v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+ T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+ T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) {
+ return internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35>(v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
+ v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+ T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+ T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) {
+ return internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36>(v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+ v34, v35, v36);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+ T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+ T37 v37) {
+ return internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37>(v1, v2, v3,
+ v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+ v34, v35, v36, v37);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+ T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+ T37 v37, T38 v38) {
+ return internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38>(v1, v2,
+ v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
+ v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32,
+ v33, v34, v35, v36, v37, v38);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+ T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+ T37 v37, T38 v38, T39 v39) {
+ return internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39>(v1,
+ v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
+ v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31,
+ v32, v33, v34, v35, v36, v37, v38, v39);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Values(T1 v1,
+ T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
+ T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
+ T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27,
+ T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35,
+ T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) {
+ return internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29,
+ v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) {
+ return internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
+ v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28,
+ v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42) {
+ return internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+ v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
+ v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41,
+ v42);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43) {
+ return internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+ v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
+ v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40,
+ v41, v42, v43);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44) {
+ return internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
+ v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39,
+ v40, v41, v42, v43, v44);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+ T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+ T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
+ T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
+ T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) {
+ return internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+ v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
+ v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38,
+ v39, v40, v41, v42, v43, v44, v45);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+ T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) {
+ return internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+ v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
+ v38, v39, v40, v41, v42, v43, v44, v45, v46);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+ T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) {
+ return internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47>(v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
+ v38, v39, v40, v41, v42, v43, v44, v45, v46, v47);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+ T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+ T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47,
+ T48 v48) {
+ return internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47, T48>(v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
+ v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36,
+ v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+ T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+ T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
+ T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
+ T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38,
+ T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46,
+ T47 v47, T48 v48, T49 v49) {
+ return internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47, T48, T49>(v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
+ v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35,
+ v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49, T50> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+ T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+ T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37,
+ T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45,
+ T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) {
+ return internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>(v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+ v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47,
+ v48, v49, v50);
+}
+
+// Bool() allows generating tests with parameters in a set of (false, true).
+//
+// Synopsis:
+// Bool()
+// - returns a generator producing sequences with elements {false, true}.
+//
+// It is useful when testing code that depends on Boolean flags. Combinations
+// of multiple flags can be tested when several Bool()'s are combined using
+// Combine() function.
+//
+// In the following example all tests in the test case FlagDependentTest
+// will be instantiated twice with parameters false and true.
+//
+// class FlagDependentTest : public testing::TestWithParam<bool> {
+// virtual void SetUp() {
+// external_flag = GetParam();
+// }
+// }
+// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool());
+//
+inline internal::ParamGenerator<bool> Bool() {
+ return Values(false, true);
+}
+
+# if GTEST_HAS_COMBINE
+// Combine() allows the user to combine two or more sequences to produce
+// values of a Cartesian product of those sequences' elements.
+//
+// Synopsis:
+// Combine(gen1, gen2, ..., genN)
+// - returns a generator producing sequences with elements coming from
+// the Cartesian product of elements from the sequences generated by
+// gen1, gen2, ..., genN. The sequence elements will have a type of
+// tuple<T1, T2, ..., TN> where T1, T2, ..., TN are the types
+// of elements from sequences produces by gen1, gen2, ..., genN.
+//
+// Combine can have up to 10 arguments. This number is currently limited
+// by the maximum number of elements in the tuple implementation used by Google
+// Test.
+//
+// Example:
+//
+// This will instantiate tests in test case AnimalTest each one with
+// the parameter values tuple("cat", BLACK), tuple("cat", WHITE),
+// tuple("dog", BLACK), and tuple("dog", WHITE):
+//
+// enum Color { BLACK, GRAY, WHITE };
+// class AnimalTest
+// : public testing::TestWithParam<tuple<const char*, Color> > {...};
+//
+// TEST_P(AnimalTest, AnimalLooksNice) {...}
+//
+// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest,
+// Combine(Values("cat", "dog"),
+// Values(BLACK, WHITE)));
+//
+// This will instantiate tests in FlagDependentTest with all variations of two
+// Boolean flags:
+//
+// class FlagDependentTest
+// : public testing::TestWithParam<tuple(bool, bool)> > {
+// virtual void SetUp() {
+// // Assigns external_flag_1 and external_flag_2 values from the tuple.
+// tie(external_flag_1, external_flag_2) = GetParam();
+// }
+// };
+//
+// TEST_P(FlagDependentTest, TestFeature1) {
+// // Test your code using external_flag_1 and external_flag_2 here.
+// }
+// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest,
+// Combine(Bool(), Bool()));
+//
+template <typename Generator1, typename Generator2>
+internal::CartesianProductHolder2<Generator1, Generator2> Combine(
+ const Generator1& g1, const Generator2& g2) {
+ return internal::CartesianProductHolder2<Generator1, Generator2>(
+ g1, g2);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3>
+internal::CartesianProductHolder3<Generator1, Generator2, Generator3> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3) {
+ return internal::CartesianProductHolder3<Generator1, Generator2, Generator3>(
+ g1, g2, g3);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4>
+internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
+ Generator4> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4) {
+ return internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
+ Generator4>(
+ g1, g2, g3, g4);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5>
+internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
+ Generator4, Generator5> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5) {
+ return internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
+ Generator4, Generator5>(
+ g1, g2, g3, g4, g5);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6>
+internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6) {
+ return internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6>(
+ g1, g2, g3, g4, g5, g6);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7>
+internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7) {
+ return internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7>(
+ g1, g2, g3, g4, g5, g6, g7);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7, typename Generator8>
+internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7, const Generator8& g8) {
+ return internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8>(
+ g1, g2, g3, g4, g5, g6, g7, g8);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7, typename Generator8, typename Generator9>
+internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8,
+ Generator9> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7, const Generator8& g8, const Generator9& g9) {
+ return internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8, Generator9>(
+ g1, g2, g3, g4, g5, g6, g7, g8, g9);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7, typename Generator8, typename Generator9,
+ typename Generator10>
+internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
+ Generator10> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7, const Generator8& g8, const Generator9& g9,
+ const Generator10& g10) {
+ return internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
+ Generator10>(
+ g1, g2, g3, g4, g5, g6, g7, g8, g9, g10);
+}
+# endif // GTEST_HAS_COMBINE
+
+
+
+# define TEST_P(test_case_name, test_name) \
+ class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
+ : public test_case_name { \
+ public: \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
+ virtual void TestBody(); \
+ private: \
+ static int AddToRegistry() { \
+ ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+ GetTestCasePatternHolder<test_case_name>(\
+ #test_case_name, __FILE__, __LINE__)->AddTestPattern(\
+ #test_case_name, \
+ #test_name, \
+ new ::testing::internal::TestMetaFactory< \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \
+ return 0; \
+ } \
+ static int gtest_registering_dummy_; \
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
+ }; \
+ int GTEST_TEST_CLASS_NAME_(test_case_name, \
+ test_name)::gtest_registering_dummy_ = \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
+ void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
+
+# define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator) \
+ ::testing::internal::ParamGenerator<test_case_name::ParamType> \
+ gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \
+ int gtest_##prefix##test_case_name##_dummy_ = \
+ ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+ GetTestCasePatternHolder<test_case_name>(\
+ #test_case_name, __FILE__, __LINE__)->AddTestCaseInstantiation(\
+ #prefix, \
+ &gtest_##prefix##test_case_name##_EvalGenerator_, \
+ __FILE__, __LINE__)
+
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Google C++ Testing Framework definitions useful in production code.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+
+// When you need to test the private or protected members of a class,
+// use the FRIEND_TEST macro to declare your tests as friends of the
+// class. For example:
+//
+// class MyClass {
+// private:
+// void MyMethod();
+// FRIEND_TEST(MyClassTest, MyMethod);
+// };
+//
+// class MyClassTest : public testing::Test {
+// // ...
+// };
+//
+// TEST_F(MyClassTest, MyMethod) {
+// // Can call MyClass::MyMethod() here.
+// }
+
+#define FRIEND_TEST(test_case_name, test_name)\
+friend class test_case_name##_##test_name##_Test
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+
+#include <iosfwd>
+#include <vector>
+
+namespace testing {
+
+// A copyable object representing the result of a test part (i.e. an
+// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
+//
+// Don't inherit from TestPartResult as its destructor is not virtual.
+class GTEST_API_ TestPartResult {
+ public:
+ // The possible outcomes of a test part (i.e. an assertion or an
+ // explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
+ enum Type {
+ kSuccess, // Succeeded.
+ kNonFatalFailure, // Failed but the test can continue.
+ kFatalFailure // Failed and the test should be terminated.
+ };
+
+ // C'tor. TestPartResult does NOT have a default constructor.
+ // Always use this constructor (with parameters) to create a
+ // TestPartResult object.
+ TestPartResult(Type a_type,
+ const char* a_file_name,
+ int a_line_number,
+ const char* a_message)
+ : type_(a_type),
+ file_name_(a_file_name),
+ line_number_(a_line_number),
+ summary_(ExtractSummary(a_message)),
+ message_(a_message) {
+ }
+
+ // Gets the outcome of the test part.
+ Type type() const { return type_; }
+
+ // Gets the name of the source file where the test part took place, or
+ // NULL if it's unknown.
+ const char* file_name() const { return file_name_.c_str(); }
+
+ // Gets the line in the source file where the test part took place,
+ // or -1 if it's unknown.
+ int line_number() const { return line_number_; }
+
+ // Gets the summary of the failure message.
+ const char* summary() const { return summary_.c_str(); }
+
+ // Gets the message associated with the test part.
+ const char* message() const { return message_.c_str(); }
+
+ // Returns true iff the test part passed.
+ bool passed() const { return type_ == kSuccess; }
+
+ // Returns true iff the test part failed.
+ bool failed() const { return type_ != kSuccess; }
+
+ // Returns true iff the test part non-fatally failed.
+ bool nonfatally_failed() const { return type_ == kNonFatalFailure; }
+
+ // Returns true iff the test part fatally failed.
+ bool fatally_failed() const { return type_ == kFatalFailure; }
+ private:
+ Type type_;
+
+ // Gets the summary of the failure message by omitting the stack
+ // trace in it.
+ static internal::String ExtractSummary(const char* message);
+
+ // The name of the source file where the test part took place, or
+ // NULL if the source file is unknown.
+ internal::String file_name_;
+ // The line in the source file where the test part took place, or -1
+ // if the line number is unknown.
+ int line_number_;
+ internal::String summary_; // The test failure summary.
+ internal::String message_; // The test failure message.
+};
+
+// Prints a TestPartResult object.
+std::ostream& operator<<(std::ostream& os, const TestPartResult& result);
+
+// An array of TestPartResult objects.
+//
+// Don't inherit from TestPartResultArray as its destructor is not
+// virtual.
+class GTEST_API_ TestPartResultArray {
+ public:
+ TestPartResultArray() {}
+
+ // Appends the given TestPartResult to the array.
+ void Append(const TestPartResult& result);
+
+ // Returns the TestPartResult at the given index (0-based).
+ const TestPartResult& GetTestPartResult(int index) const;
+
+ // Returns the number of TestPartResult objects in the array.
+ int size() const;
+
+ private:
+ std::vector<TestPartResult> array_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);
+};
+
+// This interface knows how to report a test part result.
+class TestPartResultReporterInterface {
+ public:
+ virtual ~TestPartResultReporterInterface() {}
+
+ virtual void ReportTestPartResult(const TestPartResult& result) = 0;
+};
+
+namespace internal {
+
+// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a
+// statement generates new fatal failures. To do so it registers itself as the
+// current test part result reporter. Besides checking if fatal failures were
+// reported, it only delegates the reporting to the former result reporter.
+// The original result reporter is restored in the destructor.
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+class GTEST_API_ HasNewFatalFailureHelper
+ : public TestPartResultReporterInterface {
+ public:
+ HasNewFatalFailureHelper();
+ virtual ~HasNewFatalFailureHelper();
+ virtual void ReportTestPartResult(const TestPartResult& result);
+ bool has_new_fatal_failure() const { return has_new_fatal_failure_; }
+ private:
+ bool has_new_fatal_failure_;
+ TestPartResultReporterInterface* original_reporter_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);
+};
+
+} // namespace internal
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+
+// This header implements typed tests and type-parameterized tests.
+
+// Typed (aka type-driven) tests repeat the same test for types in a
+// list. You must know which types you want to test with when writing
+// typed tests. Here's how you do it:
+
+#if 0
+
+// First, define a fixture class template. It should be parameterized
+// by a type. Remember to derive it from testing::Test.
+template <typename T>
+class FooTest : public testing::Test {
+ public:
+ ...
+ typedef std::list<T> List;
+ static T shared_;
+ T value_;
+};
+
+// Next, associate a list of types with the test case, which will be
+// repeated for each type in the list. The typedef is necessary for
+// the macro to parse correctly.
+typedef testing::Types<char, int, unsigned int> MyTypes;
+TYPED_TEST_CASE(FooTest, MyTypes);
+
+// If the type list contains only one type, you can write that type
+// directly without Types<...>:
+// TYPED_TEST_CASE(FooTest, int);
+
+// Then, use TYPED_TEST() instead of TEST_F() to define as many typed
+// tests for this test case as you want.
+TYPED_TEST(FooTest, DoesBlah) {
+ // Inside a test, refer to TypeParam to get the type parameter.
+ // Since we are inside a derived class template, C++ requires use to
+ // visit the members of FooTest via 'this'.
+ TypeParam n = this->value_;
+
+ // To visit static members of the fixture, add the TestFixture::
+ // prefix.
+ n += TestFixture::shared_;
+
+ // To refer to typedefs in the fixture, add the "typename
+ // TestFixture::" prefix.
+ typename TestFixture::List values;
+ values.push_back(n);
+ ...
+}
+
+TYPED_TEST(FooTest, HasPropertyA) { ... }
+
+#endif // 0
+
+// Type-parameterized tests are abstract test patterns parameterized
+// by a type. Compared with typed tests, type-parameterized tests
+// allow you to define the test pattern without knowing what the type
+// parameters are. The defined pattern can be instantiated with
+// different types any number of times, in any number of translation
+// units.
+//
+// If you are designing an interface or concept, you can define a
+// suite of type-parameterized tests to verify properties that any
+// valid implementation of the interface/concept should have. Then,
+// each implementation can easily instantiate the test suite to verify
+// that it conforms to the requirements, without having to write
+// similar tests repeatedly. Here's an example:
+
+#if 0
+
+// First, define a fixture class template. It should be parameterized
+// by a type. Remember to derive it from testing::Test.
+template <typename T>
+class FooTest : public testing::Test {
+ ...
+};
+
+// Next, declare that you will define a type-parameterized test case
+// (the _P suffix is for "parameterized" or "pattern", whichever you
+// prefer):
+TYPED_TEST_CASE_P(FooTest);
+
+// Then, use TYPED_TEST_P() to define as many type-parameterized tests
+// for this type-parameterized test case as you want.
+TYPED_TEST_P(FooTest, DoesBlah) {
+ // Inside a test, refer to TypeParam to get the type parameter.
+ TypeParam n = 0;
+ ...
+}
+
+TYPED_TEST_P(FooTest, HasPropertyA) { ... }
+
+// Now the tricky part: you need to register all test patterns before
+// you can instantiate them. The first argument of the macro is the
+// test case name; the rest are the names of the tests in this test
+// case.
+REGISTER_TYPED_TEST_CASE_P(FooTest,
+ DoesBlah, HasPropertyA);
+
+// Finally, you are free to instantiate the pattern with the types you
+// want. If you put the above code in a header file, you can #include
+// it in multiple C++ source files and instantiate it multiple times.
+//
+// To distinguish different instances of the pattern, the first
+// argument to the INSTANTIATE_* macro is a prefix that will be added
+// to the actual test case name. Remember to pick unique prefixes for
+// different instances.
+typedef testing::Types<char, int, unsigned int> MyTypes;
+INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes);
+
+// If the type list contains only one type, you can write that type
+// directly without Types<...>:
+// INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int);
+
+#endif // 0
+
+
+// Implements typed tests.
+
+#if GTEST_HAS_TYPED_TEST
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the name of the typedef for the type parameters of the
+// given test case.
+# define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_
+
+// The 'Types' template argument below must have spaces around it
+// since some compilers may choke on '>>' when passing a template
+// instance (e.g. Types<int>)
+# define TYPED_TEST_CASE(CaseName, Types) \
+ typedef ::testing::internal::TypeList< Types >::type \
+ GTEST_TYPE_PARAMS_(CaseName)
+
+# define TYPED_TEST(CaseName, TestName) \
+ template <typename gtest_TypeParam_> \
+ class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \
+ : public CaseName<gtest_TypeParam_> { \
+ private: \
+ typedef CaseName<gtest_TypeParam_> TestFixture; \
+ typedef gtest_TypeParam_ TypeParam; \
+ virtual void TestBody(); \
+ }; \
+ bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \
+ ::testing::internal::TypeParameterizedTest< \
+ CaseName, \
+ ::testing::internal::TemplateSel< \
+ GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \
+ GTEST_TYPE_PARAMS_(CaseName)>::Register(\
+ "", #CaseName, #TestName, 0); \
+ template <typename gtest_TypeParam_> \
+ void GTEST_TEST_CLASS_NAME_(CaseName, TestName)<gtest_TypeParam_>::TestBody()
+
+#endif // GTEST_HAS_TYPED_TEST
+
+// Implements type-parameterized tests.
+
+#if GTEST_HAS_TYPED_TEST_P
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the namespace name that the type-parameterized tests for
+// the given type-parameterized test case are defined in. The exact
+// name of the namespace is subject to change without notice.
+# define GTEST_CASE_NAMESPACE_(TestCaseName) \
+ gtest_case_##TestCaseName##_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the name of the variable used to remember the names of
+// the defined tests in the given test case.
+# define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \
+ gtest_typed_test_case_p_state_##TestCaseName##_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.
+//
+// Expands to the name of the variable used to remember the names of
+// the registered tests in the given test case.
+# define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \
+ gtest_registered_test_names_##TestCaseName##_
+
+// The variables defined in the type-parameterized test macros are
+// static as typically these macros are used in a .h file that can be
+// #included in multiple translation units linked together.
+# define TYPED_TEST_CASE_P(CaseName) \
+ static ::testing::internal::TypedTestCasePState \
+ GTEST_TYPED_TEST_CASE_P_STATE_(CaseName)
+
+# define TYPED_TEST_P(CaseName, TestName) \
+ namespace GTEST_CASE_NAMESPACE_(CaseName) { \
+ template <typename gtest_TypeParam_> \
+ class TestName : public CaseName<gtest_TypeParam_> { \
+ private: \
+ typedef CaseName<gtest_TypeParam_> TestFixture; \
+ typedef gtest_TypeParam_ TypeParam; \
+ virtual void TestBody(); \
+ }; \
+ static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \
+ GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\
+ __FILE__, __LINE__, #CaseName, #TestName); \
+ } \
+ template <typename gtest_TypeParam_> \
+ void GTEST_CASE_NAMESPACE_(CaseName)::TestName<gtest_TypeParam_>::TestBody()
+
+# define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \
+ namespace GTEST_CASE_NAMESPACE_(CaseName) { \
+ typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \
+ } \
+ static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \
+ GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\
+ __FILE__, __LINE__, #__VA_ARGS__)
+
+// The 'Types' template argument below must have spaces around it
+// since some compilers may choke on '>>' when passing a template
+// instance (e.g. Types<int>)
+# define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \
+ bool gtest_##Prefix##_##CaseName GTEST_ATTRIBUTE_UNUSED_ = \
+ ::testing::internal::TypeParameterizedTestCase<CaseName, \
+ GTEST_CASE_NAMESPACE_(CaseName)::gtest_AllTests_, \
+ ::testing::internal::TypeList< Types >::type>::Register(\
+ #Prefix, #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName))
+
+#endif // GTEST_HAS_TYPED_TEST_P
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+
+// Depending on the platform, different string classes are available.
+// On Linux, in addition to ::std::string, Google also makes use of
+// class ::string, which has the same interface as ::std::string, but
+// has a different implementation.
+//
+// The user can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that
+// ::string is available AND is a distinct type to ::std::string, or
+// define it to 0 to indicate otherwise.
+//
+// If the user's ::std::string and ::string are the same class due to
+// aliasing, he should define GTEST_HAS_GLOBAL_STRING to 0.
+//
+// If the user doesn't define GTEST_HAS_GLOBAL_STRING, it is defined
+// heuristically.
+
+namespace testing {
+
+// Declares the flags.
+
+// This flag temporary enables the disabled tests.
+GTEST_DECLARE_bool_(also_run_disabled_tests);
+
+// This flag brings the debugger on an assertion failure.
+GTEST_DECLARE_bool_(break_on_failure);
+
+// This flag controls whether Google Test catches all test-thrown exceptions
+// and logs them as failures.
+GTEST_DECLARE_bool_(catch_exceptions);
+
+// This flag enables using colors in terminal output. Available values are
+// "yes" to enable colors, "no" (disable colors), or "auto" (the default)
+// to let Google Test decide.
+GTEST_DECLARE_string_(color);
+
+// This flag sets up the filter to select by name using a glob pattern
+// the tests to run. If the filter is not given all tests are executed.
+GTEST_DECLARE_string_(filter);
+
+// This flag causes the Google Test to list tests. None of the tests listed
+// are actually run if the flag is provided.
+GTEST_DECLARE_bool_(list_tests);
+
+// This flag controls whether Google Test emits a detailed XML report to a file
+// in addition to its normal textual output.
+GTEST_DECLARE_string_(output);
+
+// This flags control whether Google Test prints the elapsed time for each
+// test.
+GTEST_DECLARE_bool_(print_time);
+
+// This flag specifies the random number seed.
+GTEST_DECLARE_int32_(random_seed);
+
+// This flag sets how many times the tests are repeated. The default value
+// is 1. If the value is -1 the tests are repeating forever.
+GTEST_DECLARE_int32_(repeat);
+
+// This flag controls whether Google Test includes Google Test internal
+// stack frames in failure stack traces.
+GTEST_DECLARE_bool_(show_internal_stack_frames);
+
+// When this flag is specified, tests' order is randomized on every iteration.
+GTEST_DECLARE_bool_(shuffle);
+
+// This flag specifies the maximum number of stack frames to be
+// printed in a failure message.
+GTEST_DECLARE_int32_(stack_trace_depth);
+
+// When this flag is specified, a failed assertion will throw an
+// exception if exceptions are enabled, or exit the program with a
+// non-zero code otherwise.
+GTEST_DECLARE_bool_(throw_on_failure);
+
+// When this flag is set with a "host:port" string, on supported
+// platforms test results are streamed to the specified port on
+// the specified host machine.
+GTEST_DECLARE_string_(stream_result_to);
+
+// The upper limit for valid stack trace depths.
+const int kMaxStackTraceDepth = 100;
+
+namespace internal {
+
+class AssertHelper;
+class DefaultGlobalTestPartResultReporter;
+class ExecDeathTest;
+class NoExecDeathTest;
+class FinalSuccessChecker;
+class GTestFlagSaver;
+class TestResultAccessor;
+class TestEventListenersAccessor;
+class TestEventRepeater;
+class WindowsDeathTest;
+class UnitTestImpl* GetUnitTestImpl();
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+ const String& message);
+
+// Converts a streamable value to a String. A NULL pointer is
+// converted to "(null)". When the input value is a ::string,
+// ::std::string, ::wstring, or ::std::wstring object, each NUL
+// character in it is replaced with "\\0".
+// Declared in gtest-internal.h but defined here, so that it has access
+// to the definition of the Message class, required by the ARM
+// compiler.
+template <typename T>
+String StreamableToString(const T& streamable) {
+ return (Message() << streamable).GetString();
+}
+
+} // namespace internal
+
+// The friend relationship of some of these classes is cyclic.
+// If we don't forward declare them the compiler might confuse the classes
+// in friendship clauses with same named classes on the scope.
+class Test;
+class TestCase;
+class TestInfo;
+class UnitTest;
+
+// A class for indicating whether an assertion was successful. When
+// the assertion wasn't successful, the AssertionResult object
+// remembers a non-empty message that describes how it failed.
+//
+// To create an instance of this class, use one of the factory functions
+// (AssertionSuccess() and AssertionFailure()).
+//
+// This class is useful for two purposes:
+// 1. Defining predicate functions to be used with Boolean test assertions
+// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts
+// 2. Defining predicate-format functions to be
+// used with predicate assertions (ASSERT_PRED_FORMAT*, etc).
+//
+// For example, if you define IsEven predicate:
+//
+// testing::AssertionResult IsEven(int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess();
+// else
+// return testing::AssertionFailure() << n << " is odd";
+// }
+//
+// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5)))
+// will print the message
+//
+// Value of: IsEven(Fib(5))
+// Actual: false (5 is odd)
+// Expected: true
+//
+// instead of a more opaque
+//
+// Value of: IsEven(Fib(5))
+// Actual: false
+// Expected: true
+//
+// in case IsEven is a simple Boolean predicate.
+//
+// If you expect your predicate to be reused and want to support informative
+// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up
+// about half as often as positive ones in our tests), supply messages for
+// both success and failure cases:
+//
+// testing::AssertionResult IsEven(int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess() << n << " is even";
+// else
+// return testing::AssertionFailure() << n << " is odd";
+// }
+//
+// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print
+//
+// Value of: IsEven(Fib(6))
+// Actual: true (8 is even)
+// Expected: false
+//
+// NB: Predicates that support negative Boolean assertions have reduced
+// performance in positive ones so be careful not to use them in tests
+// that have lots (tens of thousands) of positive Boolean assertions.
+//
+// To use this class with EXPECT_PRED_FORMAT assertions such as:
+//
+// // Verifies that Foo() returns an even number.
+// EXPECT_PRED_FORMAT1(IsEven, Foo());
+//
+// you need to define:
+//
+// testing::AssertionResult IsEven(const char* expr, int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess();
+// else
+// return testing::AssertionFailure()
+// << "Expected: " << expr << " is even\n Actual: it's " << n;
+// }
+//
+// If Foo() returns 5, you will see the following message:
+//
+// Expected: Foo() is even
+// Actual: it's 5
+//
+class GTEST_API_ AssertionResult {
+ public:
+ // Copy constructor.
+ // Used in EXPECT_TRUE/FALSE(assertion_result).
+ AssertionResult(const AssertionResult& other);
+ // Used in the EXPECT_TRUE/FALSE(bool_expression).
+ explicit AssertionResult(bool success) : success_(success) {}
+
+ // Returns true iff the assertion succeeded.
+ operator bool() const { return success_; } // NOLINT
+
+ // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+ AssertionResult operator!() const;
+
+ // Returns the text streamed into this AssertionResult. Test assertions
+ // use it when they fail (i.e., the predicate's outcome doesn't match the
+ // assertion's expectation). When nothing has been streamed into the
+ // object, returns an empty string.
+ const char* message() const {
+ return message_.get() != NULL ? message_->c_str() : "";
+ }
+ // TODO(vladl@google.com): Remove this after making sure no clients use it.
+ // Deprecated; please use message() instead.
+ const char* failure_message() const { return message(); }
+
+ // Streams a custom failure message into this object.
+ template <typename T> AssertionResult& operator<<(const T& value) {
+ AppendMessage(Message() << value);
+ return *this;
+ }
+
+ // Allows streaming basic output manipulators such as endl or flush into
+ // this object.
+ AssertionResult& operator<<(
+ ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) {
+ AppendMessage(Message() << basic_manipulator);
+ return *this;
+ }
+
+ private:
+ // Appends the contents of message to message_.
+ void AppendMessage(const Message& a_message) {
+ if (message_.get() == NULL)
+ message_.reset(new ::std::string);
+ message_->append(a_message.GetString().c_str());
+ }
+
+ // Stores result of the assertion predicate.
+ bool success_;
+ // Stores the message describing the condition in case the expectation
+ // construct is not satisfied with the predicate's outcome.
+ // Referenced via a pointer to avoid taking too much stack frame space
+ // with test assertions.
+ internal::scoped_ptr< ::std::string> message_;
+
+ GTEST_DISALLOW_ASSIGN_(AssertionResult);
+};
+
+// Makes a successful assertion result.
+GTEST_API_ AssertionResult AssertionSuccess();
+
+// Makes a failed assertion result.
+GTEST_API_ AssertionResult AssertionFailure();
+
+// Makes a failed assertion result with the given failure message.
+// Deprecated; use AssertionFailure() << msg.
+GTEST_API_ AssertionResult AssertionFailure(const Message& msg);
+
+// The abstract class that all tests inherit from.
+//
+// In Google Test, a unit test program contains one or many TestCases, and
+// each TestCase contains one or many Tests.
+//
+// When you define a test using the TEST macro, you don't need to
+// explicitly derive from Test - the TEST macro automatically does
+// this for you.
+//
+// The only time you derive from Test is when defining a test fixture
+// to be used a TEST_F. For example:
+//
+// class FooTest : public testing::Test {
+// protected:
+// virtual void SetUp() { ... }
+// virtual void TearDown() { ... }
+// ...
+// };
+//
+// TEST_F(FooTest, Bar) { ... }
+// TEST_F(FooTest, Baz) { ... }
+//
+// Test is not copyable.
+class GTEST_API_ Test {
+ public:
+ friend class TestInfo;
+
+ // Defines types for pointers to functions that set up and tear down
+ // a test case.
+ typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc;
+ typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc;
+
+ // The d'tor is virtual as we intend to inherit from Test.
+ virtual ~Test();
+
+ // Sets up the stuff shared by all tests in this test case.
+ //
+ // Google Test will call Foo::SetUpTestCase() before running the first
+ // test in test case Foo. Hence a sub-class can define its own
+ // SetUpTestCase() method to shadow the one defined in the super
+ // class.
+ static void SetUpTestCase() {}
+
+ // Tears down the stuff shared by all tests in this test case.
+ //
+ // Google Test will call Foo::TearDownTestCase() after running the last
+ // test in test case Foo. Hence a sub-class can define its own
+ // TearDownTestCase() method to shadow the one defined in the super
+ // class.
+ static void TearDownTestCase() {}
+
+ // Returns true iff the current test has a fatal failure.
+ static bool HasFatalFailure();
+
+ // Returns true iff the current test has a non-fatal failure.
+ static bool HasNonfatalFailure();
+
+ // Returns true iff the current test has a (either fatal or
+ // non-fatal) failure.
+ static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); }
+
+ // Logs a property for the current test. Only the last value for a given
+ // key is remembered.
+ // These are public static so they can be called from utility functions
+ // that are not members of the test fixture.
+ // The arguments are const char* instead strings, as Google Test is used
+ // on platforms where string doesn't compile.
+ //
+ // Note that a driving consideration for these RecordProperty methods
+ // was to produce xml output suited to the Greenspan charting utility,
+ // which at present will only chart values that fit in a 32-bit int. It
+ // is the user's responsibility to restrict their values to 32-bit ints
+ // if they intend them to be used with Greenspan.
+ static void RecordProperty(const char* key, const char* value);
+ static void RecordProperty(const char* key, int value);
+
+ protected:
+ // Creates a Test object.
+ Test();
+
+ // Sets up the test fixture.
+ virtual void SetUp();
+
+ // Tears down the test fixture.
+ virtual void TearDown();
+
+ private:
+ // Returns true iff the current test has the same fixture class as
+ // the first test in the current test case.
+ static bool HasSameFixtureClass();
+
+ // Runs the test after the test fixture has been set up.
+ //
+ // A sub-class must implement this to define the test logic.
+ //
+ // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM.
+ // Instead, use the TEST or TEST_F macro.
+ virtual void TestBody() = 0;
+
+ // Sets up, executes, and tears down the test.
+ void Run();
+
+ // Deletes self. We deliberately pick an unusual name for this
+ // internal method to avoid clashing with names used in user TESTs.
+ void DeleteSelf_() { delete this; }
+
+ // Uses a GTestFlagSaver to save and restore all Google Test flags.
+ const internal::GTestFlagSaver* const gtest_flag_saver_;
+
+ // Often a user mis-spells SetUp() as Setup() and spends a long time
+ // wondering why it is never called by Google Test. The declaration of
+ // the following method is solely for catching such an error at
+ // compile time:
+ //
+ // - The return type is deliberately chosen to be not void, so it
+ // will be a conflict if a user declares void Setup() in his test
+ // fixture.
+ //
+ // - This method is private, so it will be another compiler error
+ // if a user calls it from his test fixture.
+ //
+ // DO NOT OVERRIDE THIS FUNCTION.
+ //
+ // If you see an error about overriding the following function or
+ // about it being private, you have mis-spelled SetUp() as Setup().
+ struct Setup_should_be_spelled_SetUp {};
+ virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
+
+ // We disallow copying Tests.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Test);
+};
+
+typedef internal::TimeInMillis TimeInMillis;
+
+// A copyable object representing a user specified test property which can be
+// output as a key/value string pair.
+//
+// Don't inherit from TestProperty as its destructor is not virtual.
+class TestProperty {
+ public:
+ // C'tor. TestProperty does NOT have a default constructor.
+ // Always use this constructor (with parameters) to create a
+ // TestProperty object.
+ TestProperty(const char* a_key, const char* a_value) :
+ key_(a_key), value_(a_value) {
+ }
+
+ // Gets the user supplied key.
+ const char* key() const {
+ return key_.c_str();
+ }
+
+ // Gets the user supplied value.
+ const char* value() const {
+ return value_.c_str();
+ }
+
+ // Sets a new value, overriding the one supplied in the constructor.
+ void SetValue(const char* new_value) {
+ value_ = new_value;
+ }
+
+ private:
+ // The key supplied by the user.
+ internal::String key_;
+ // The value supplied by the user.
+ internal::String value_;
+};
+
+// The result of a single Test. This includes a list of
+// TestPartResults, a list of TestProperties, a count of how many
+// death tests there are in the Test, and how much time it took to run
+// the Test.
+//
+// TestResult is not copyable.
+class GTEST_API_ TestResult {
+ public:
+ // Creates an empty TestResult.
+ TestResult();
+
+ // D'tor. Do not inherit from TestResult.
+ ~TestResult();
+
+ // Gets the number of all test parts. This is the sum of the number
+ // of successful test parts and the number of failed test parts.
+ int total_part_count() const;
+
+ // Returns the number of the test properties.
+ int test_property_count() const;
+
+ // Returns true iff the test passed (i.e. no test part failed).
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the test failed.
+ bool Failed() const;
+
+ // Returns true iff the test fatally failed.
+ bool HasFatalFailure() const;
+
+ // Returns true iff the test has a non-fatal failure.
+ bool HasNonfatalFailure() const;
+
+ // Returns the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns the i-th test part result among all the results. i can range
+ // from 0 to test_property_count() - 1. If i is not in that range, aborts
+ // the program.
+ const TestPartResult& GetTestPartResult(int i) const;
+
+ // Returns the i-th test property. i can range from 0 to
+ // test_property_count() - 1. If i is not in that range, aborts the
+ // program.
+ const TestProperty& GetTestProperty(int i) const;
+
+ private:
+ friend class TestInfo;
+ friend class UnitTest;
+ friend class internal::DefaultGlobalTestPartResultReporter;
+ friend class internal::ExecDeathTest;
+ friend class internal::TestResultAccessor;
+ friend class internal::UnitTestImpl;
+ friend class internal::WindowsDeathTest;
+
+ // Gets the vector of TestPartResults.
+ const std::vector<TestPartResult>& test_part_results() const {
+ return test_part_results_;
+ }
+
+ // Gets the vector of TestProperties.
+ const std::vector<TestProperty>& test_properties() const {
+ return test_properties_;
+ }
+
+ // Sets the elapsed time.
+ void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; }
+
+ // Adds a test property to the list. The property is validated and may add
+ // a non-fatal failure if invalid (e.g., if it conflicts with reserved
+ // key names). If a property is already recorded for the same key, the
+ // value will be updated, rather than storing multiple values for the same
+ // key.
+ void RecordProperty(const TestProperty& test_property);
+
+ // Adds a failure if the key is a reserved attribute of Google Test
+ // testcase tags. Returns true if the property is valid.
+ // TODO(russr): Validate attribute names are legal and human readable.
+ static bool ValidateTestProperty(const TestProperty& test_property);
+
+ // Adds a test part result to the list.
+ void AddTestPartResult(const TestPartResult& test_part_result);
+
+ // Returns the death test count.
+ int death_test_count() const { return death_test_count_; }
+
+ // Increments the death test count, returning the new count.
+ int increment_death_test_count() { return ++death_test_count_; }
+
+ // Clears the test part results.
+ void ClearTestPartResults();
+
+ // Clears the object.
+ void Clear();
+
+ // Protects mutable state of the property vector and of owned
+ // properties, whose values may be updated.
+ internal::Mutex test_properites_mutex_;
+
+ // The vector of TestPartResults
+ std::vector<TestPartResult> test_part_results_;
+ // The vector of TestProperties
+ std::vector<TestProperty> test_properties_;
+ // Running count of death tests.
+ int death_test_count_;
+ // The elapsed time, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+ // We disallow copying TestResult.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult);
+}; // class TestResult
+
+// A TestInfo object stores the following information about a test:
+//
+// Test case name
+// Test name
+// Whether the test should be run
+// A function pointer that creates the test object when invoked
+// Test result
+//
+// The constructor of TestInfo registers itself with the UnitTest
+// singleton such that the RUN_ALL_TESTS() macro knows which tests to
+// run.
+class GTEST_API_ TestInfo {
+ public:
+ // Destructs a TestInfo object. This function is not virtual, so
+ // don't inherit from TestInfo.
+ ~TestInfo();
+
+ // Returns the test case name.
+ const char* test_case_name() const { return test_case_name_.c_str(); }
+
+ // Returns the test name.
+ const char* name() const { return name_.c_str(); }
+
+ // Returns the name of the parameter type, or NULL if this is not a typed
+ // or a type-parameterized test.
+ const char* type_param() const {
+ if (type_param_.get() != NULL)
+ return type_param_->c_str();
+ return NULL;
+ }
+
+ // Returns the text representation of the value parameter, or NULL if this
+ // is not a value-parameterized test.
+ const char* value_param() const {
+ if (value_param_.get() != NULL)
+ return value_param_->c_str();
+ return NULL;
+ }
+
+ // Returns true if this test should run, that is if the test is not disabled
+ // (or it is disabled but the also_run_disabled_tests flag has been specified)
+ // and its full name matches the user-specified filter.
+ //
+ // Google Test allows the user to filter the tests by their full names.
+ // The full name of a test Bar in test case Foo is defined as
+ // "Foo.Bar". Only the tests that match the filter will run.
+ //
+ // A filter is a colon-separated list of glob (not regex) patterns,
+ // optionally followed by a '-' and a colon-separated list of
+ // negative patterns (tests to exclude). A test is run if it
+ // matches one of the positive patterns and does not match any of
+ // the negative patterns.
+ //
+ // For example, *A*:Foo.* is a filter that matches any string that
+ // contains the character 'A' or starts with "Foo.".
+ bool should_run() const { return should_run_; }
+
+ // Returns the result of the test.
+ const TestResult* result() const { return &result_; }
+
+ private:
+
+#if GTEST_HAS_DEATH_TEST
+ friend class internal::DefaultDeathTestFactory;
+#endif // GTEST_HAS_DEATH_TEST
+ friend class Test;
+ friend class TestCase;
+ friend class internal::UnitTestImpl;
+ friend TestInfo* internal::MakeAndRegisterTestInfo(
+ const char* test_case_name, const char* name,
+ const char* type_param,
+ const char* value_param,
+ internal::TypeId fixture_class_id,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc,
+ internal::TestFactoryBase* factory);
+
+ // Constructs a TestInfo object. The newly constructed instance assumes
+ // ownership of the factory object.
+ TestInfo(const char* test_case_name, const char* name,
+ const char* a_type_param,
+ const char* a_value_param,
+ internal::TypeId fixture_class_id,
+ internal::TestFactoryBase* factory);
+
+ // Increments the number of death tests encountered in this test so
+ // far.
+ int increment_death_test_count() {
+ return result_.increment_death_test_count();
+ }
+
+ // Creates the test object, runs it, records its result, and then
+ // deletes it.
+ void Run();
+
+ static void ClearTestResult(TestInfo* test_info) {
+ test_info->result_.Clear();
+ }
+
+ // These fields are immutable properties of the test.
+ const std::string test_case_name_; // Test case name
+ const std::string name_; // Test name
+ // Name of the parameter type, or NULL if this is not a typed or a
+ // type-parameterized test.
+ const internal::scoped_ptr<const ::std::string> type_param_;
+ // Text representation of the value parameter, or NULL if this is not a
+ // value-parameterized test.
+ const internal::scoped_ptr<const ::std::string> value_param_;
+ const internal::TypeId fixture_class_id_; // ID of the test fixture class
+ bool should_run_; // True iff this test should run
+ bool is_disabled_; // True iff this test is disabled
+ bool matches_filter_; // True if this test matches the
+ // user-specified filter.
+ internal::TestFactoryBase* const factory_; // The factory that creates
+ // the test object
+
+ // This field is mutable and needs to be reset before running the
+ // test for the second time.
+ TestResult result_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo);
+};
+
+// A test case, which consists of a vector of TestInfos.
+//
+// TestCase is not copyable.
+class GTEST_API_ TestCase {
+ public:
+ // Creates a TestCase with the given name.
+ //
+ // TestCase does NOT have a default constructor. Always use this
+ // constructor to create a TestCase object.
+ //
+ // Arguments:
+ //
+ // name: name of the test case
+ // a_type_param: the name of the test's type parameter, or NULL if
+ // this is not a type-parameterized test.
+ // set_up_tc: pointer to the function that sets up the test case
+ // tear_down_tc: pointer to the function that tears down the test case
+ TestCase(const char* name, const char* a_type_param,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc);
+
+ // Destructor of TestCase.
+ virtual ~TestCase();
+
+ // Gets the name of the TestCase.
+ const char* name() const { return name_.c_str(); }
+
+ // Returns the name of the parameter type, or NULL if this is not a
+ // type-parameterized test case.
+ const char* type_param() const {
+ if (type_param_.get() != NULL)
+ return type_param_->c_str();
+ return NULL;
+ }
+
+ // Returns true if any test in this test case should run.
+ bool should_run() const { return should_run_; }
+
+ // Gets the number of successful tests in this test case.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests in this test case.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests in this test case.
+ int disabled_test_count() const;
+
+ // Get the number of tests in this test case that should run.
+ int test_to_run_count() const;
+
+ // Gets the number of all tests in this test case.
+ int total_test_count() const;
+
+ // Returns true iff the test case passed.
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the test case failed.
+ bool Failed() const { return failed_test_count() > 0; }
+
+ // Returns the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns the i-th test among all the tests. i can range from 0 to
+ // total_test_count() - 1. If i is not in that range, returns NULL.
+ const TestInfo* GetTestInfo(int i) const;
+
+ private:
+ friend class Test;
+ friend class internal::UnitTestImpl;
+
+ // Gets the (mutable) vector of TestInfos in this TestCase.
+ std::vector<TestInfo*>& test_info_list() { return test_info_list_; }
+
+ // Gets the (immutable) vector of TestInfos in this TestCase.
+ const std::vector<TestInfo*>& test_info_list() const {
+ return test_info_list_;
+ }
+
+ // Returns the i-th test among all the tests. i can range from 0 to
+ // total_test_count() - 1. If i is not in that range, returns NULL.
+ TestInfo* GetMutableTestInfo(int i);
+
+ // Sets the should_run member.
+ void set_should_run(bool should) { should_run_ = should; }
+
+ // Adds a TestInfo to this test case. Will delete the TestInfo upon
+ // destruction of the TestCase object.
+ void AddTestInfo(TestInfo * test_info);
+
+ // Clears the results of all tests in this test case.
+ void ClearResult();
+
+ // Clears the results of all tests in the given test case.
+ static void ClearTestCaseResult(TestCase* test_case) {
+ test_case->ClearResult();
+ }
+
+ // Runs every test in this TestCase.
+ void Run();
+
+ // Runs SetUpTestCase() for this TestCase. This wrapper is needed
+ // for catching exceptions thrown from SetUpTestCase().
+ void RunSetUpTestCase() { (*set_up_tc_)(); }
+
+ // Runs TearDownTestCase() for this TestCase. This wrapper is
+ // needed for catching exceptions thrown from TearDownTestCase().
+ void RunTearDownTestCase() { (*tear_down_tc_)(); }
+
+ // Returns true iff test passed.
+ static bool TestPassed(const TestInfo* test_info) {
+ return test_info->should_run() && test_info->result()->Passed();
+ }
+
+ // Returns true iff test failed.
+ static bool TestFailed(const TestInfo* test_info) {
+ return test_info->should_run() && test_info->result()->Failed();
+ }
+
+ // Returns true iff test is disabled.
+ static bool TestDisabled(const TestInfo* test_info) {
+ return test_info->is_disabled_;
+ }
+
+ // Returns true if the given test should run.
+ static bool ShouldRunTest(const TestInfo* test_info) {
+ return test_info->should_run();
+ }
+
+ // Shuffles the tests in this test case.
+ void ShuffleTests(internal::Random* random);
+
+ // Restores the test order to before the first shuffle.
+ void UnshuffleTests();
+
+ // Name of the test case.
+ internal::String name_;
+ // Name of the parameter type, or NULL if this is not a typed or a
+ // type-parameterized test.
+ const internal::scoped_ptr<const ::std::string> type_param_;
+ // The vector of TestInfos in their original order. It owns the
+ // elements in the vector.
+ std::vector<TestInfo*> test_info_list_;
+ // Provides a level of indirection for the test list to allow easy
+ // shuffling and restoring the test order. The i-th element in this
+ // vector is the index of the i-th test in the shuffled test list.
+ std::vector<int> test_indices_;
+ // Pointer to the function that sets up the test case.
+ Test::SetUpTestCaseFunc set_up_tc_;
+ // Pointer to the function that tears down the test case.
+ Test::TearDownTestCaseFunc tear_down_tc_;
+ // True iff any test in this test case should run.
+ bool should_run_;
+ // Elapsed time, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+ // We disallow copying TestCases.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase);
+};
+
+// An Environment object is capable of setting up and tearing down an
+// environment. The user should subclass this to define his own
+// environment(s).
+//
+// An Environment object does the set-up and tear-down in virtual
+// methods SetUp() and TearDown() instead of the constructor and the
+// destructor, as:
+//
+// 1. You cannot safely throw from a destructor. This is a problem
+// as in some cases Google Test is used where exceptions are enabled, and
+// we may want to implement ASSERT_* using exceptions where they are
+// available.
+// 2. You cannot use ASSERT_* directly in a constructor or
+// destructor.
+class Environment {
+ public:
+ // The d'tor is virtual as we need to subclass Environment.
+ virtual ~Environment() {}
+
+ // Override this to define how to set up the environment.
+ virtual void SetUp() {}
+
+ // Override this to define how to tear down the environment.
+ virtual void TearDown() {}
+ private:
+ // If you see an error about overriding the following function or
+ // about it being private, you have mis-spelled SetUp() as Setup().
+ struct Setup_should_be_spelled_SetUp {};
+ virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
+};
+
+// The interface for tracing execution of tests. The methods are organized in
+// the order the corresponding events are fired.
+class TestEventListener {
+ public:
+ virtual ~TestEventListener() {}
+
+ // Fired before any test activity starts.
+ virtual void OnTestProgramStart(const UnitTest& unit_test) = 0;
+
+ // Fired before each iteration of tests starts. There may be more than
+ // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration
+ // index, starting from 0.
+ virtual void OnTestIterationStart(const UnitTest& unit_test,
+ int iteration) = 0;
+
+ // Fired before environment set-up for each iteration of tests starts.
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0;
+
+ // Fired after environment set-up for each iteration of tests ends.
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0;
+
+ // Fired before the test case starts.
+ virtual void OnTestCaseStart(const TestCase& test_case) = 0;
+
+ // Fired before the test starts.
+ virtual void OnTestStart(const TestInfo& test_info) = 0;
+
+ // Fired after a failed assertion or a SUCCEED() invocation.
+ virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0;
+
+ // Fired after the test ends.
+ virtual void OnTestEnd(const TestInfo& test_info) = 0;
+
+ // Fired after the test case ends.
+ virtual void OnTestCaseEnd(const TestCase& test_case) = 0;
+
+ // Fired before environment tear-down for each iteration of tests starts.
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0;
+
+ // Fired after environment tear-down for each iteration of tests ends.
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0;
+
+ // Fired after each iteration of tests finishes.
+ virtual void OnTestIterationEnd(const UnitTest& unit_test,
+ int iteration) = 0;
+
+ // Fired after all test activities have ended.
+ virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0;
+};
+
+// The convenience class for users who need to override just one or two
+// methods and are not concerned that a possible change to a signature of
+// the methods they override will not be caught during the build. For
+// comments about each method please see the definition of TestEventListener
+// above.
+class EmptyTestEventListener : public TestEventListener {
+ public:
+ virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationStart(const UnitTest& /*unit_test*/,
+ int /*iteration*/) {}
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestCaseStart(const TestCase& /*test_case*/) {}
+ virtual void OnTestStart(const TestInfo& /*test_info*/) {}
+ virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {}
+ virtual void OnTestEnd(const TestInfo& /*test_info*/) {}
+ virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {}
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/,
+ int /*iteration*/) {}
+ virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
+};
+
+// TestEventListeners lets users add listeners to track events in Google Test.
+class GTEST_API_ TestEventListeners {
+ public:
+ TestEventListeners();
+ ~TestEventListeners();
+
+ // Appends an event listener to the end of the list. Google Test assumes
+ // the ownership of the listener (i.e. it will delete the listener when
+ // the test program finishes).
+ void Append(TestEventListener* listener);
+
+ // Removes the given event listener from the list and returns it. It then
+ // becomes the caller's responsibility to delete the listener. Returns
+ // NULL if the listener is not found in the list.
+ TestEventListener* Release(TestEventListener* listener);
+
+ // Returns the standard listener responsible for the default console
+ // output. Can be removed from the listeners list to shut down default
+ // console output. Note that removing this object from the listener list
+ // with Release transfers its ownership to the caller and makes this
+ // function return NULL the next time.
+ TestEventListener* default_result_printer() const {
+ return default_result_printer_;
+ }
+
+ // Returns the standard listener responsible for the default XML output
+ // controlled by the --gtest_output=xml flag. Can be removed from the
+ // listeners list by users who want to shut down the default XML output
+ // controlled by this flag and substitute it with custom one. Note that
+ // removing this object from the listener list with Release transfers its
+ // ownership to the caller and makes this function return NULL the next
+ // time.
+ TestEventListener* default_xml_generator() const {
+ return default_xml_generator_;
+ }
+
+ private:
+ friend class TestCase;
+ friend class TestInfo;
+ friend class internal::DefaultGlobalTestPartResultReporter;
+ friend class internal::NoExecDeathTest;
+ friend class internal::TestEventListenersAccessor;
+ friend class internal::UnitTestImpl;
+
+ // Returns repeater that broadcasts the TestEventListener events to all
+ // subscribers.
+ TestEventListener* repeater();
+
+ // Sets the default_result_printer attribute to the provided listener.
+ // The listener is also added to the listener list and previous
+ // default_result_printer is removed from it and deleted. The listener can
+ // also be NULL in which case it will not be added to the list. Does
+ // nothing if the previous and the current listener objects are the same.
+ void SetDefaultResultPrinter(TestEventListener* listener);
+
+ // Sets the default_xml_generator attribute to the provided listener. The
+ // listener is also added to the listener list and previous
+ // default_xml_generator is removed from it and deleted. The listener can
+ // also be NULL in which case it will not be added to the list. Does
+ // nothing if the previous and the current listener objects are the same.
+ void SetDefaultXmlGenerator(TestEventListener* listener);
+
+ // Controls whether events will be forwarded by the repeater to the
+ // listeners in the list.
+ bool EventForwardingEnabled() const;
+ void SuppressEventForwarding();
+
+ // The actual list of listeners.
+ internal::TestEventRepeater* repeater_;
+ // Listener responsible for the standard result output.
+ TestEventListener* default_result_printer_;
+ // Listener responsible for the creation of the XML output file.
+ TestEventListener* default_xml_generator_;
+
+ // We disallow copying TestEventListeners.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners);
+};
+
+// A UnitTest consists of a vector of TestCases.
+//
+// This is a singleton class. The only instance of UnitTest is
+// created when UnitTest::GetInstance() is first called. This
+// instance is never deleted.
+//
+// UnitTest is not copyable.
+//
+// This class is thread-safe as long as the methods are called
+// according to their specification.
+class GTEST_API_ UnitTest {
+ public:
+ // Gets the singleton UnitTest object. The first time this method
+ // is called, a UnitTest object is constructed and returned.
+ // Consecutive calls will return the same object.
+ static UnitTest* GetInstance();
+
+ // Runs all tests in this UnitTest object and prints the result.
+ // Returns 0 if successful, or 1 otherwise.
+ //
+ // This method can only be called from the main thread.
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ int Run() GTEST_MUST_USE_RESULT_;
+
+ // Returns the working directory when the first TEST() or TEST_F()
+ // was executed. The UnitTest object owns the string.
+ const char* original_working_dir() const;
+
+ // Returns the TestCase object for the test that's currently running,
+ // or NULL if no test is running.
+ const TestCase* current_test_case() const;
+
+ // Returns the TestInfo object for the test that's currently running,
+ // or NULL if no test is running.
+ const TestInfo* current_test_info() const;
+
+ // Returns the random seed used at the start of the current test run.
+ int random_seed() const;
+
+#if GTEST_HAS_PARAM_TEST
+ // Returns the ParameterizedTestCaseRegistry object used to keep track of
+ // value-parameterized tests and instantiate and register them.
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ internal::ParameterizedTestCaseRegistry& parameterized_test_registry();
+#endif // GTEST_HAS_PARAM_TEST
+
+ // Gets the number of successful test cases.
+ int successful_test_case_count() const;
+
+ // Gets the number of failed test cases.
+ int failed_test_case_count() const;
+
+ // Gets the number of all test cases.
+ int total_test_case_count() const;
+
+ // Gets the number of all test cases that contain at least one test
+ // that should run.
+ int test_case_to_run_count() const;
+
+ // Gets the number of successful tests.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests.
+ int disabled_test_count() const;
+
+ // Gets the number of all tests.
+ int total_test_count() const;
+
+ // Gets the number of tests that should run.
+ int test_to_run_count() const;
+
+ // Gets the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const;
+
+ // Returns true iff the unit test passed (i.e. all test cases passed).
+ bool Passed() const;
+
+ // Returns true iff the unit test failed (i.e. some test case failed
+ // or something outside of all tests failed).
+ bool Failed() const;
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ const TestCase* GetTestCase(int i) const;
+
+ // Returns the list of event listeners that can be used to track events
+ // inside Google Test.
+ TestEventListeners& listeners();
+
+ private:
+ // Registers and returns a global test environment. When a test
+ // program is run, all global test environments will be set-up in
+ // the order they were registered. After all tests in the program
+ // have finished, all global test environments will be torn-down in
+ // the *reverse* order they were registered.
+ //
+ // The UnitTest object takes ownership of the given environment.
+ //
+ // This method can only be called from the main thread.
+ Environment* AddEnvironment(Environment* env);
+
+ // Adds a TestPartResult to the current TestResult object. All
+ // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc)
+ // eventually call this to report their results. The user code
+ // should use the assertion macros instead of calling this directly.
+ void AddTestPartResult(TestPartResult::Type result_type,
+ const char* file_name,
+ int line_number,
+ const internal::String& message,
+ const internal::String& os_stack_trace);
+
+ // Adds a TestProperty to the current TestResult object. If the result already
+ // contains a property with the same key, the value will be updated.
+ void RecordPropertyForCurrentTest(const char* key, const char* value);
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ TestCase* GetMutableTestCase(int i);
+
+ // Accessors for the implementation object.
+ internal::UnitTestImpl* impl() { return impl_; }
+ const internal::UnitTestImpl* impl() const { return impl_; }
+
+ // These classes and funcions are friends as they need to access private
+ // members of UnitTest.
+ friend class Test;
+ friend class internal::AssertHelper;
+ friend class internal::ScopedTrace;
+ friend Environment* AddGlobalTestEnvironment(Environment* env);
+ friend internal::UnitTestImpl* internal::GetUnitTestImpl();
+ friend void internal::ReportFailureInUnknownLocation(
+ TestPartResult::Type result_type,
+ const internal::String& message);
+
+ // Creates an empty UnitTest.
+ UnitTest();
+
+ // D'tor
+ virtual ~UnitTest();
+
+ // Pushes a trace defined by SCOPED_TRACE() on to the per-thread
+ // Google Test trace stack.
+ void PushGTestTrace(const internal::TraceInfo& trace);
+
+ // Pops a trace from the per-thread Google Test trace stack.
+ void PopGTestTrace();
+
+ // Protects mutable state in *impl_. This is mutable as some const
+ // methods need to lock it too.
+ mutable internal::Mutex mutex_;
+
+ // Opaque implementation object. This field is never changed once
+ // the object is constructed. We don't mark it as const here, as
+ // doing so will cause a warning in the constructor of UnitTest.
+ // Mutable state in *impl_ is protected by mutex_.
+ internal::UnitTestImpl* impl_;
+
+ // We disallow copying UnitTest.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest);
+};
+
+// A convenient wrapper for adding an environment for the test
+// program.
+//
+// You should call this before RUN_ALL_TESTS() is called, probably in
+// main(). If you use gtest_main, you need to call this before main()
+// starts for it to take effect. For example, you can define a global
+// variable like this:
+//
+// testing::Environment* const foo_env =
+// testing::AddGlobalTestEnvironment(new FooEnvironment);
+//
+// However, we strongly recommend you to write your own main() and
+// call AddGlobalTestEnvironment() there, as relying on initialization
+// of global variables makes the code harder to read and may cause
+// problems when you register multiple environments from different
+// translation units and the environments have dependencies among them
+// (remember that the compiler doesn't guarantee the order in which
+// global variables from different translation units are initialized).
+inline Environment* AddGlobalTestEnvironment(Environment* env) {
+ return UnitTest::GetInstance()->AddEnvironment(env);
+}
+
+// Initializes Google Test. This must be called before calling
+// RUN_ALL_TESTS(). In particular, it parses a command line for the
+// flags that Google Test recognizes. Whenever a Google Test flag is
+// seen, it is removed from argv, and *argc is decremented.
+//
+// No value is returned. Instead, the Google Test flag variables are
+// updated.
+//
+// Calling the function for the second time has no user-visible effect.
+GTEST_API_ void InitGoogleTest(int* argc, char** argv);
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv);
+
+namespace internal {
+
+// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc)
+// operand to be used in a failure message. The type (but not value)
+// of the other operand may affect the format. This allows us to
+// print a char* as a raw pointer when it is compared against another
+// char*, and print it as a C string when it is compared against an
+// std::string object, for example.
+//
+// The default implementation ignores the type of the other operand.
+// Some specialized versions are used to handle formatting wide or
+// narrow C strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+template <typename T1, typename T2>
+String FormatForComparisonFailureMessage(const T1& value,
+ const T2& /* other_operand */) {
+ // C++Builder compiles this incorrectly if the namespace isn't explicitly
+ // given.
+ return ::testing::PrintToString(value);
+}
+
+// The helper function for {ASSERT|EXPECT}_EQ.
+template <typename T1, typename T2>
+AssertionResult CmpHelperEQ(const char* expected_expression,
+ const char* actual_expression,
+ const T1& expected,
+ const T2& actual) {
+#ifdef _MSC_VER
+# pragma warning(push) // Saves the current warning state.
+# pragma warning(disable:4389) // Temporarily disables warning on
+ // signed/unsigned mismatch.
+#endif
+
+ if (expected == actual) {
+ return AssertionSuccess();
+ }
+
+#ifdef _MSC_VER
+# pragma warning(pop) // Restores the warning state.
+#endif
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ FormatForComparisonFailureMessage(expected, actual),
+ FormatForComparisonFailureMessage(actual, expected),
+ false);
+}
+
+// With this overloaded version, we allow anonymous enums to be used
+// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums
+// can be implicitly cast to BiggestInt.
+GTEST_API_ AssertionResult CmpHelperEQ(const char* expected_expression,
+ const char* actual_expression,
+ BiggestInt expected,
+ BiggestInt actual);
+
+// The helper class for {ASSERT|EXPECT}_EQ. The template argument
+// lhs_is_null_literal is true iff the first argument to ASSERT_EQ()
+// is a null pointer literal. The following default implementation is
+// for lhs_is_null_literal being false.
+template <bool lhs_is_null_literal>
+class EqHelper {
+ public:
+ // This templatized version is for the general case.
+ template <typename T1, typename T2>
+ static AssertionResult Compare(const char* expected_expression,
+ const char* actual_expression,
+ const T1& expected,
+ const T2& actual) {
+ return CmpHelperEQ(expected_expression, actual_expression, expected,
+ actual);
+ }
+
+ // With this overloaded version, we allow anonymous enums to be used
+ // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous
+ // enums can be implicitly cast to BiggestInt.
+ //
+ // Even though its body looks the same as the above version, we
+ // cannot merge the two, as it will make anonymous enums unhappy.
+ static AssertionResult Compare(const char* expected_expression,
+ const char* actual_expression,
+ BiggestInt expected,
+ BiggestInt actual) {
+ return CmpHelperEQ(expected_expression, actual_expression, expected,
+ actual);
+ }
+};
+
+// This specialization is used when the first argument to ASSERT_EQ()
+// is a null pointer literal, like NULL, false, or 0.
+template <>
+class EqHelper<true> {
+ public:
+ // We define two overloaded versions of Compare(). The first
+ // version will be picked when the second argument to ASSERT_EQ() is
+ // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or
+ // EXPECT_EQ(false, a_bool).
+ template <typename T1, typename T2>
+ static AssertionResult Compare(
+ const char* expected_expression,
+ const char* actual_expression,
+ const T1& expected,
+ const T2& actual,
+ // The following line prevents this overload from being considered if T2
+ // is not a pointer type. We need this because ASSERT_EQ(NULL, my_ptr)
+ // expands to Compare("", "", NULL, my_ptr), which requires a conversion
+ // to match the Secret* in the other overload, which would otherwise make
+ // this template match better.
+ typename EnableIf<!is_pointer<T2>::value>::type* = 0) {
+ return CmpHelperEQ(expected_expression, actual_expression, expected,
+ actual);
+ }
+
+ // This version will be picked when the second argument to ASSERT_EQ() is a
+ // pointer, e.g. ASSERT_EQ(NULL, a_pointer).
+ template <typename T>
+ static AssertionResult Compare(
+ const char* expected_expression,
+ const char* actual_expression,
+ // We used to have a second template parameter instead of Secret*. That
+ // template parameter would deduce to 'long', making this a better match
+ // than the first overload even without the first overload's EnableIf.
+ // Unfortunately, gcc with -Wconversion-null warns when "passing NULL to
+ // non-pointer argument" (even a deduced integral argument), so the old
+ // implementation caused warnings in user code.
+ Secret* /* expected (NULL) */,
+ T* actual) {
+ // We already know that 'expected' is a null pointer.
+ return CmpHelperEQ(expected_expression, actual_expression,
+ static_cast<T*>(NULL), actual);
+ }
+};
+
+// A macro for implementing the helper functions needed to implement
+// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste
+// of similar code.
+//
+// For each templatized helper function, we also define an overloaded
+// version for BiggestInt in order to reduce code bloat and allow
+// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled
+// with gcc 4.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
+template <typename T1, typename T2>\
+AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
+ const T1& val1, const T2& val2) {\
+ if (val1 op val2) {\
+ return AssertionSuccess();\
+ } else {\
+ return AssertionFailure() \
+ << "Expected: (" << expr1 << ") " #op " (" << expr2\
+ << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\
+ << " vs " << FormatForComparisonFailureMessage(val2, val1);\
+ }\
+}\
+GTEST_API_ AssertionResult CmpHelper##op_name(\
+ const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2)
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+
+// Implements the helper function for {ASSERT|EXPECT}_NE
+GTEST_IMPL_CMP_HELPER_(NE, !=);
+// Implements the helper function for {ASSERT|EXPECT}_LE
+GTEST_IMPL_CMP_HELPER_(LE, <=);
+// Implements the helper function for {ASSERT|EXPECT}_LT
+GTEST_IMPL_CMP_HELPER_(LT, < );
+// Implements the helper function for {ASSERT|EXPECT}_GE
+GTEST_IMPL_CMP_HELPER_(GE, >=);
+// Implements the helper function for {ASSERT|EXPECT}_GT
+GTEST_IMPL_CMP_HELPER_(GT, > );
+
+#undef GTEST_IMPL_CMP_HELPER_
+
+// The helper function for {ASSERT|EXPECT}_STREQ.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression,
+ const char* actual_expression,
+ const char* expected,
+ const char* actual);
+
+// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression,
+ const char* actual_expression,
+ const char* expected,
+ const char* actual);
+
+// The helper function for {ASSERT|EXPECT}_STRNE.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+// The helper function for {ASSERT|EXPECT}_STRCASENE.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+
+// Helper function for *_STREQ on wide strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression,
+ const char* actual_expression,
+ const wchar_t* expected,
+ const wchar_t* actual);
+
+// Helper function for *_STRNE on wide strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const wchar_t* s1,
+ const wchar_t* s2);
+
+} // namespace internal
+
+// IsSubstring() and IsNotSubstring() are intended to be used as the
+// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by
+// themselves. They check whether needle is a substring of haystack
+// (NULL is considered a substring of itself only), and return an
+// appropriate error message when they fail.
+//
+// The {needle,haystack}_expr arguments are the stringified
+// expressions that generated the two real arguments.
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack);
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack);
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack);
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack);
+#endif // GTEST_HAS_STD_WSTRING
+
+namespace internal {
+
+// Helper template function for comparing floating-points.
+//
+// Template parameter:
+//
+// RawType: the raw floating-point type (either float or double)
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+template <typename RawType>
+AssertionResult CmpHelperFloatingPointEQ(const char* expected_expression,
+ const char* actual_expression,
+ RawType expected,
+ RawType actual) {
+ const FloatingPoint<RawType> lhs(expected), rhs(actual);
+
+ if (lhs.AlmostEquals(rhs)) {
+ return AssertionSuccess();
+ }
+
+ ::std::stringstream expected_ss;
+ expected_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << expected;
+
+ ::std::stringstream actual_ss;
+ actual_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << actual;
+
+ return EqFailure(expected_expression,
+ actual_expression,
+ StringStreamToString(&expected_ss),
+ StringStreamToString(&actual_ss),
+ false);
+}
+
+// Helper function for implementing ASSERT_NEAR.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1,
+ const char* expr2,
+ const char* abs_error_expr,
+ double val1,
+ double val2,
+ double abs_error);
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+// A class that enables one to stream messages to assertion macros
+class GTEST_API_ AssertHelper {
+ public:
+ // Constructor.
+ AssertHelper(TestPartResult::Type type,
+ const char* file,
+ int line,
+ const char* message);
+ ~AssertHelper();
+
+ // Message assignment is a semantic trick to enable assertion
+ // streaming; see the GTEST_MESSAGE_ macro below.
+ void operator=(const Message& message) const;
+
+ private:
+ // We put our data in a struct so that the size of the AssertHelper class can
+ // be as small as possible. This is important because gcc is incapable of
+ // re-using stack space even for temporary variables, so every EXPECT_EQ
+ // reserves stack space for another AssertHelper.
+ struct AssertHelperData {
+ AssertHelperData(TestPartResult::Type t,
+ const char* srcfile,
+ int line_num,
+ const char* msg)
+ : type(t), file(srcfile), line(line_num), message(msg) { }
+
+ TestPartResult::Type const type;
+ const char* const file;
+ int const line;
+ String const message;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData);
+ };
+
+ AssertHelperData* const data_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper);
+};
+
+} // namespace internal
+
+#if GTEST_HAS_PARAM_TEST
+// The pure interface class that all value-parameterized tests inherit from.
+// A value-parameterized class must inherit from both ::testing::Test and
+// ::testing::WithParamInterface. In most cases that just means inheriting
+// from ::testing::TestWithParam, but more complicated test hierarchies
+// may need to inherit from Test and WithParamInterface at different levels.
+//
+// This interface has support for accessing the test parameter value via
+// the GetParam() method.
+//
+// Use it with one of the parameter generator defining functions, like Range(),
+// Values(), ValuesIn(), Bool(), and Combine().
+//
+// class FooTest : public ::testing::TestWithParam<int> {
+// protected:
+// FooTest() {
+// // Can use GetParam() here.
+// }
+// virtual ~FooTest() {
+// // Can use GetParam() here.
+// }
+// virtual void SetUp() {
+// // Can use GetParam() here.
+// }
+// virtual void TearDown {
+// // Can use GetParam() here.
+// }
+// };
+// TEST_P(FooTest, DoesBar) {
+// // Can use GetParam() method here.
+// Foo foo;
+// ASSERT_TRUE(foo.DoesBar(GetParam()));
+// }
+// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10));
+
+template <typename T>
+class WithParamInterface {
+ public:
+ typedef T ParamType;
+ virtual ~WithParamInterface() {}
+
+ // The current parameter value. Is also available in the test fixture's
+ // constructor. This member function is non-static, even though it only
+ // references static data, to reduce the opportunity for incorrect uses
+ // like writing 'WithParamInterface<bool>::GetParam()' for a test that
+ // uses a fixture whose parameter type is int.
+ const ParamType& GetParam() const { return *parameter_; }
+
+ private:
+ // Sets parameter value. The caller is responsible for making sure the value
+ // remains alive and unchanged throughout the current test.
+ static void SetParam(const ParamType* parameter) {
+ parameter_ = parameter;
+ }
+
+ // Static value used for accessing parameter during a test lifetime.
+ static const ParamType* parameter_;
+
+ // TestClass must be a subclass of WithParamInterface<T> and Test.
+ template <class TestClass> friend class internal::ParameterizedTestFactory;
+};
+
+template <typename T>
+const T* WithParamInterface<T>::parameter_ = NULL;
+
+// Most value-parameterized classes can ignore the existence of
+// WithParamInterface, and can just inherit from ::testing::TestWithParam.
+
+template <typename T>
+class TestWithParam : public Test, public WithParamInterface<T> {
+};
+
+#endif // GTEST_HAS_PARAM_TEST
+
+// Macros for indicating success/failure in test code.
+
+// ADD_FAILURE unconditionally adds a failure to the current test.
+// SUCCEED generates a success - it doesn't automatically make the
+// current test successful, as a test is only successful when it has
+// no failure.
+//
+// EXPECT_* verifies that a certain condition is satisfied. If not,
+// it behaves like ADD_FAILURE. In particular:
+//
+// EXPECT_TRUE verifies that a Boolean condition is true.
+// EXPECT_FALSE verifies that a Boolean condition is false.
+//
+// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except
+// that they will also abort the current function on failure. People
+// usually want the fail-fast behavior of FAIL and ASSERT_*, but those
+// writing data-driven tests often find themselves using ADD_FAILURE
+// and EXPECT_* more.
+//
+// Examples:
+//
+// EXPECT_TRUE(server.StatusIsOK());
+// ASSERT_FALSE(server.HasPendingRequest(port))
+// << "There are still pending requests " << "on port " << port;
+
+// Generates a nonfatal failure with a generic message.
+#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed")
+
+// Generates a nonfatal failure at the given source file location with
+// a generic message.
+#define ADD_FAILURE_AT(file, line) \
+ GTEST_MESSAGE_AT_(file, line, "Failed", \
+ ::testing::TestPartResult::kNonFatalFailure)
+
+// Generates a fatal failure with a generic message.
+#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed")
+
+// Define this macro to 1 to omit the definition of FAIL(), which is a
+// generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_FAIL
+# define FAIL() GTEST_FAIL()
+#endif
+
+// Generates a success with a generic message.
+#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded")
+
+// Define this macro to 1 to omit the definition of SUCCEED(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_SUCCEED
+# define SUCCEED() GTEST_SUCCEED()
+#endif
+
+// Macros for testing exceptions.
+//
+// * {ASSERT|EXPECT}_THROW(statement, expected_exception):
+// Tests that the statement throws the expected exception.
+// * {ASSERT|EXPECT}_NO_THROW(statement):
+// Tests that the statement doesn't throw any exception.
+// * {ASSERT|EXPECT}_ANY_THROW(statement):
+// Tests that the statement throws an exception.
+
+#define EXPECT_THROW(statement, expected_exception) \
+ GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_NO_THROW(statement) \
+ GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_ANY_THROW(statement) \
+ GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_THROW(statement, expected_exception) \
+ GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_)
+#define ASSERT_NO_THROW(statement) \
+ GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_)
+#define ASSERT_ANY_THROW(statement) \
+ GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_)
+
+// Boolean assertions. Condition can be either a Boolean expression or an
+// AssertionResult. For more information on how to use AssertionResult with
+// these macros see comments on that class.
+#define EXPECT_TRUE(condition) \
+ GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
+ GTEST_NONFATAL_FAILURE_)
+#define EXPECT_FALSE(condition) \
+ GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
+ GTEST_NONFATAL_FAILURE_)
+#define ASSERT_TRUE(condition) \
+ GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \
+ GTEST_FATAL_FAILURE_)
+#define ASSERT_FALSE(condition) \
+ GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
+ GTEST_FATAL_FAILURE_)
+
+// Includes the auto-generated header that implements a family of
+// generic predicate assertion macros.
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is AUTOMATICALLY GENERATED on 09/24/2010 by command
+// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND!
+//
+// Implements a family of generic predicate assertion macros.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+
+// Makes sure this header is not included before gtest.h.
+#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
+# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
+#endif // GTEST_INCLUDE_GTEST_GTEST_H_
+
+// This header implements a family of generic predicate assertion
+// macros:
+//
+// ASSERT_PRED_FORMAT1(pred_format, v1)
+// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
+// ...
+//
+// where pred_format is a function or functor that takes n (in the
+// case of ASSERT_PRED_FORMATn) values and their source expression
+// text, and returns a testing::AssertionResult. See the definition
+// of ASSERT_EQ in gtest.h for an example.
+//
+// If you don't care about formatting, you can use the more
+// restrictive version:
+//
+// ASSERT_PRED1(pred, v1)
+// ASSERT_PRED2(pred, v1, v2)
+// ...
+//
+// where pred is an n-ary function or functor that returns bool,
+// and the values v1, v2, ..., must support the << operator for
+// streaming to std::ostream.
+//
+// We also define the EXPECT_* variations.
+//
+// For now we only support predicates whose arity is at most 5.
+// Please email googletestframework@googlegroups.com if you need
+// support for higher arities.
+
+// GTEST_ASSERT_ is the basic statement to which all of the assertions
+// in this file reduce. Don't use this in your code.
+
+#define GTEST_ASSERT_(expression, on_failure) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const ::testing::AssertionResult gtest_ar = (expression)) \
+ ; \
+ else \
+ on_failure(gtest_ar.failure_message())
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1>
+AssertionResult AssertPred1Helper(const char* pred_text,
+ const char* e1,
+ Pred pred,
+ const T1& v1) {
+ if (pred(v1)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, v1),\
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use
+// this in your code.
+#define GTEST_PRED1_(pred, v1, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \
+ #v1, \
+ pred, \
+ v1), on_failure)
+
+// Unary predicate assertion macros.
+#define EXPECT_PRED_FORMAT1(pred_format, v1) \
+ GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED1(pred, v1) \
+ GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT1(pred_format, v1) \
+ GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED1(pred, v1) \
+ GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2>
+AssertionResult AssertPred2Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ Pred pred,
+ const T1& v1,
+ const T2& v2) {
+ if (pred(v1, v2)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ", "
+ << e2 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2),\
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use
+// this in your code.
+#define GTEST_PRED2_(pred, v1, v2, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \
+ #v1, \
+ #v2, \
+ pred, \
+ v1, \
+ v2), on_failure)
+
+// Binary predicate assertion macros.
+#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \
+ GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED2(pred, v1, v2) \
+ GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \
+ GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED2(pred, v1, v2) \
+ GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3>
+AssertionResult AssertPred3Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3) {
+ if (pred(v1, v2, v3)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ", "
+ << e2 << ", "
+ << e3 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2
+ << "\n" << e3 << " evaluates to " << v3;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3),\
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use
+// this in your code.
+#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ pred, \
+ v1, \
+ v2, \
+ v3), on_failure)
+
+// Ternary predicate assertion macros.
+#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \
+ GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED3(pred, v1, v2, v3) \
+ GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \
+ GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED3(pred, v1, v2, v3) \
+ GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4>
+AssertionResult AssertPred4Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ const char* e4,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3,
+ const T4& v4) {
+ if (pred(v1, v2, v3, v4)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ", "
+ << e2 << ", "
+ << e3 << ", "
+ << e4 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2
+ << "\n" << e3 << " evaluates to " << v3
+ << "\n" << e4 << " evaluates to " << v4;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4),\
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use
+// this in your code.
+#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ #v4, \
+ pred, \
+ v1, \
+ v2, \
+ v3, \
+ v4), on_failure)
+
+// 4-ary predicate assertion macros.
+#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
+ GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED4(pred, v1, v2, v3, v4) \
+ GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
+ GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED4(pred, v1, v2, v3, v4) \
+ GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5>
+AssertionResult AssertPred5Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ const char* e4,
+ const char* e5,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3,
+ const T4& v4,
+ const T5& v5) {
+ if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ", "
+ << e2 << ", "
+ << e3 << ", "
+ << e4 << ", "
+ << e5 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2
+ << "\n" << e3 << " evaluates to " << v3
+ << "\n" << e4 << " evaluates to " << v4
+ << "\n" << e5 << " evaluates to " << v5;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5),\
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use
+// this in your code.
+#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ #v4, \
+ #v5, \
+ pred, \
+ v1, \
+ v2, \
+ v3, \
+ v4, \
+ v5), on_failure)
+
+// 5-ary predicate assertion macros.
+#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
+ GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \
+ GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
+ GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \
+ GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
+
+
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+
+// Macros for testing equalities and inequalities.
+//
+// * {ASSERT|EXPECT}_EQ(expected, actual): Tests that expected == actual
+// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2
+// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2
+// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2
+// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2
+// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2
+//
+// When they are not, Google Test prints both the tested expressions and
+// their actual values. The values must be compatible built-in types,
+// or you will get a compiler error. By "compatible" we mean that the
+// values can be compared by the respective operator.
+//
+// Note:
+//
+// 1. It is possible to make a user-defined type work with
+// {ASSERT|EXPECT}_??(), but that requires overloading the
+// comparison operators and is thus discouraged by the Google C++
+// Usage Guide. Therefore, you are advised to use the
+// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are
+// equal.
+//
+// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on
+// pointers (in particular, C strings). Therefore, if you use it
+// with two C strings, you are testing how their locations in memory
+// are related, not how their content is related. To compare two C
+// strings by content, use {ASSERT|EXPECT}_STR*().
+//
+// 3. {ASSERT|EXPECT}_EQ(expected, actual) is preferred to
+// {ASSERT|EXPECT}_TRUE(expected == actual), as the former tells you
+// what the actual value is when it fails, and similarly for the
+// other comparisons.
+//
+// 4. Do not depend on the order in which {ASSERT|EXPECT}_??()
+// evaluate their arguments, which is undefined.
+//
+// 5. These macros evaluate their arguments exactly once.
+//
+// Examples:
+//
+// EXPECT_NE(5, Foo());
+// EXPECT_EQ(NULL, a_pointer);
+// ASSERT_LT(i, array_size);
+// ASSERT_GT(records.size(), 0) << "There is no record left.";
+
+#define EXPECT_EQ(expected, actual) \
+ EXPECT_PRED_FORMAT2(::testing::internal:: \
+ EqHelper<GTEST_IS_NULL_LITERAL_(expected)>::Compare, \
+ expected, actual)
+#define EXPECT_NE(expected, actual) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, expected, actual)
+#define EXPECT_LE(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
+#define EXPECT_LT(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
+#define EXPECT_GE(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
+#define EXPECT_GT(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
+
+#define GTEST_ASSERT_EQ(expected, actual) \
+ ASSERT_PRED_FORMAT2(::testing::internal:: \
+ EqHelper<GTEST_IS_NULL_LITERAL_(expected)>::Compare, \
+ expected, actual)
+#define GTEST_ASSERT_NE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)
+#define GTEST_ASSERT_LE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
+#define GTEST_ASSERT_LT(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
+#define GTEST_ASSERT_GE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
+#define GTEST_ASSERT_GT(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
+
+// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of
+// ASSERT_XY(), which clashes with some users' own code.
+
+#if !GTEST_DONT_DEFINE_ASSERT_EQ
+# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_NE
+# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_LE
+# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_LT
+# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_GE
+# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_GT
+# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2)
+#endif
+
+// C String Comparisons. All tests treat NULL and any non-NULL string
+// as different. Two NULLs are equal.
+//
+// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2
+// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2
+// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case
+// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case
+//
+// For wide or narrow string objects, you can use the
+// {ASSERT|EXPECT}_??() macros.
+//
+// Don't depend on the order in which the arguments are evaluated,
+// which is undefined.
+//
+// These macros evaluate their arguments exactly once.
+
+#define EXPECT_STREQ(expected, actual) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual)
+#define EXPECT_STRNE(s1, s2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
+#define EXPECT_STRCASEEQ(expected, actual) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual)
+#define EXPECT_STRCASENE(s1, s2)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
+
+#define ASSERT_STREQ(expected, actual) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual)
+#define ASSERT_STRNE(s1, s2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
+#define ASSERT_STRCASEEQ(expected, actual) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual)
+#define ASSERT_STRCASENE(s1, s2)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
+
+// Macros for comparing floating-point numbers.
+//
+// * {ASSERT|EXPECT}_FLOAT_EQ(expected, actual):
+// Tests that two float values are almost equal.
+// * {ASSERT|EXPECT}_DOUBLE_EQ(expected, actual):
+// Tests that two double values are almost equal.
+// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error):
+// Tests that v1 and v2 are within the given distance to each other.
+//
+// Google Test uses ULP-based comparison to automatically pick a default
+// error bound that is appropriate for the operands. See the
+// FloatingPoint template class in gtest-internal.h if you are
+// interested in the implementation details.
+
+#define EXPECT_FLOAT_EQ(expected, actual)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
+ expected, actual)
+
+#define EXPECT_DOUBLE_EQ(expected, actual)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
+ expected, actual)
+
+#define ASSERT_FLOAT_EQ(expected, actual)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
+ expected, actual)
+
+#define ASSERT_DOUBLE_EQ(expected, actual)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
+ expected, actual)
+
+#define EXPECT_NEAR(val1, val2, abs_error)\
+ EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
+ val1, val2, abs_error)
+
+#define ASSERT_NEAR(val1, val2, abs_error)\
+ ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
+ val1, val2, abs_error)
+
+// These predicate format functions work on floating-point values, and
+// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g.
+//
+// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0);
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2,
+ float val1, float val2);
+GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2,
+ double val1, double val2);
+
+
+#if GTEST_OS_WINDOWS
+
+// Macros that test for HRESULT failure and success, these are only useful
+// on Windows, and rely on Windows SDK macros and APIs to compile.
+//
+// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr)
+//
+// When expr unexpectedly fails or succeeds, Google Test prints the
+// expected result and the actual result with both a human-readable
+// string representation of the error, if available, as well as the
+// hex result code.
+# define EXPECT_HRESULT_SUCCEEDED(expr) \
+ EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
+
+# define ASSERT_HRESULT_SUCCEEDED(expr) \
+ ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
+
+# define EXPECT_HRESULT_FAILED(expr) \
+ EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
+
+# define ASSERT_HRESULT_FAILED(expr) \
+ ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
+
+#endif // GTEST_OS_WINDOWS
+
+// Macros that execute statement and check that it doesn't generate new fatal
+// failures in the current thread.
+//
+// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement);
+//
+// Examples:
+//
+// EXPECT_NO_FATAL_FAILURE(Process());
+// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed";
+//
+#define ASSERT_NO_FATAL_FAILURE(statement) \
+ GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_)
+#define EXPECT_NO_FATAL_FAILURE(statement) \
+ GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_)
+
+// Causes a trace (including the source file path, the current line
+// number, and the given message) to be included in every test failure
+// message generated by code in the current scope. The effect is
+// undone when the control leaves the current scope.
+//
+// The message argument can be anything streamable to std::ostream.
+//
+// In the implementation, we include the current line number as part
+// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s
+// to appear in the same block - as long as they are on different
+// lines.
+#define SCOPED_TRACE(message) \
+ ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\
+ __FILE__, __LINE__, ::testing::Message() << (message))
+
+// Compile-time assertion for type equality.
+// StaticAssertTypeEq<type1, type2>() compiles iff type1 and type2 are
+// the same type. The value it returns is not interesting.
+//
+// Instead of making StaticAssertTypeEq a class template, we make it a
+// function template that invokes a helper class template. This
+// prevents a user from misusing StaticAssertTypeEq<T1, T2> by
+// defining objects of that type.
+//
+// CAVEAT:
+//
+// When used inside a method of a class template,
+// StaticAssertTypeEq<T1, T2>() is effective ONLY IF the method is
+// instantiated. For example, given:
+//
+// template <typename T> class Foo {
+// public:
+// void Bar() { testing::StaticAssertTypeEq<int, T>(); }
+// };
+//
+// the code:
+//
+// void Test1() { Foo<bool> foo; }
+//
+// will NOT generate a compiler error, as Foo<bool>::Bar() is never
+// actually instantiated. Instead, you need:
+//
+// void Test2() { Foo<bool> foo; foo.Bar(); }
+//
+// to cause a compiler error.
+template <typename T1, typename T2>
+bool StaticAssertTypeEq() {
+ (void)internal::StaticAssertTypeEqHelper<T1, T2>();
+ return true;
+}
+
+// Defines a test.
+//
+// The first parameter is the name of the test case, and the second
+// parameter is the name of the test within the test case.
+//
+// The convention is to end the test case name with "Test". For
+// example, a test case for the Foo class can be named FooTest.
+//
+// The user should put his test code between braces after using this
+// macro. Example:
+//
+// TEST(FooTest, InitializesCorrectly) {
+// Foo foo;
+// EXPECT_TRUE(foo.StatusIsOK());
+// }
+
+// Note that we call GetTestTypeId() instead of GetTypeId<
+// ::testing::Test>() here to get the type ID of testing::Test. This
+// is to work around a suspected linker bug when using Google Test as
+// a framework on Mac OS X. The bug causes GetTypeId<
+// ::testing::Test>() to return different values depending on whether
+// the call is from the Google Test framework itself or from user test
+// code. GetTestTypeId() is guaranteed to always return the same
+// value, as it always calls GetTypeId<>() from the Google Test
+// framework.
+#define GTEST_TEST(test_case_name, test_name)\
+ GTEST_TEST_(test_case_name, test_name, \
+ ::testing::Test, ::testing::internal::GetTestTypeId())
+
+// Define this macro to 1 to omit the definition of TEST(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_TEST
+# define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name)
+#endif
+
+// Defines a test that uses a test fixture.
+//
+// The first parameter is the name of the test fixture class, which
+// also doubles as the test case name. The second parameter is the
+// name of the test within the test case.
+//
+// A test fixture class must be declared earlier. The user should put
+// his test code between braces after using this macro. Example:
+//
+// class FooTest : public testing::Test {
+// protected:
+// virtual void SetUp() { b_.AddElement(3); }
+//
+// Foo a_;
+// Foo b_;
+// };
+//
+// TEST_F(FooTest, InitializesCorrectly) {
+// EXPECT_TRUE(a_.StatusIsOK());
+// }
+//
+// TEST_F(FooTest, ReturnsElementCountCorrectly) {
+// EXPECT_EQ(0, a_.size());
+// EXPECT_EQ(1, b_.size());
+// }
+
+#define TEST_F(test_fixture, test_name)\
+ GTEST_TEST_(test_fixture, test_name, test_fixture, \
+ ::testing::internal::GetTypeId<test_fixture>())
+
+// Use this macro in main() to run all tests. It returns 0 if all
+// tests are successful, or 1 otherwise.
+//
+// RUN_ALL_TESTS() should be invoked after the command line has been
+// parsed by InitGoogleTest().
+
+#define RUN_ALL_TESTS()\
+ (::testing::UnitTest::GetInstance()->Run())
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_H_
diff --git a/internal/ceres/implicit_schur_complement.cc b/internal/ceres/implicit_schur_complement.cc
new file mode 100644
index 0000000..4af030a
--- /dev/null
+++ b/internal/ceres/implicit_schur_complement.cc
@@ -0,0 +1,228 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/implicit_schur_complement.h"
+
+#include "Eigen/Dense"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+ImplicitSchurComplement::ImplicitSchurComplement(int num_eliminate_blocks,
+ bool preconditioner)
+ : num_eliminate_blocks_(num_eliminate_blocks),
+ preconditioner_(preconditioner),
+ A_(NULL),
+ D_(NULL),
+ b_(NULL),
+ block_diagonal_EtE_inverse_(NULL),
+ block_diagonal_FtF_inverse_(NULL) {
+}
+
+ImplicitSchurComplement::~ImplicitSchurComplement() {
+}
+
+void ImplicitSchurComplement::Init(const BlockSparseMatrixBase& A,
+ const double* D,
+ const double* b) {
+ // Since initialization is reasonably heavy, perhaps we can save on
+ // constructing a new object everytime.
+ if (A_ == NULL) {
+ A_.reset(new PartitionedMatrixView(A, num_eliminate_blocks_));
+ }
+
+ D_ = D;
+ b_ = b;
+
+ // Initialize temporary storage and compute the block diagonals of
+ // E'E and F'E.
+ if (block_diagonal_EtE_inverse_ == NULL) {
+ block_diagonal_EtE_inverse_.reset(A_->CreateBlockDiagonalEtE());
+ if (preconditioner_) {
+ block_diagonal_FtF_inverse_.reset(A_->CreateBlockDiagonalFtF());
+ }
+ rhs_.resize(A_->num_cols_f());
+ rhs_.setZero();
+ tmp_rows_.resize(A_->num_rows());
+ tmp_e_cols_.resize(A_->num_cols_e());
+ tmp_e_cols_2_.resize(A_->num_cols_e());
+ tmp_f_cols_.resize(A_->num_cols_f());
+ } else {
+ A_->UpdateBlockDiagonalEtE(block_diagonal_EtE_inverse_.get());
+ if (preconditioner_) {
+ A_->UpdateBlockDiagonalFtF(block_diagonal_FtF_inverse_.get());
+ }
+ }
+
+ // The block diagonals of the augmented linear system contain
+ // contributions from the diagonal D if it is non-null. Add that to
+ // the block diagonals and invert them.
+ AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get());
+ if (preconditioner_) {
+ AddDiagonalAndInvert((D_ == NULL) ? NULL : D_ + A_->num_cols_e(),
+ block_diagonal_FtF_inverse_.get());
+ }
+
+ // Compute the RHS of the Schur complement system.
+ UpdateRhs();
+}
+
+// Evaluate the product
+//
+// Sx = [F'F - F'E (E'E)^-1 E'F]x
+//
+// By breaking it down into individual matrix vector products
+// involving the matrices E and F. This is implemented using a
+// PartitionedMatrixView of the input matrix A.
+void ImplicitSchurComplement::RightMultiply(const double* x, double* y) const {
+ // y1 = F x
+ tmp_rows_.setZero();
+ A_->RightMultiplyF(x, tmp_rows_.data());
+
+ // y2 = E' y1
+ tmp_e_cols_.setZero();
+ A_->LeftMultiplyE(tmp_rows_.data(), tmp_e_cols_.data());
+
+ // y3 = -(E'E)^-1 y2
+ tmp_e_cols_2_.setZero();
+ block_diagonal_EtE_inverse_->RightMultiply(tmp_e_cols_.data(),
+ tmp_e_cols_2_.data());
+ tmp_e_cols_2_ *= -1.0;
+
+ // y1 = y1 + E y3
+ A_->RightMultiplyE(tmp_e_cols_2_.data(), tmp_rows_.data());
+
+ // y5 = D * x
+ if (D_ != NULL) {
+ ConstVectorRef Dref(D_ + A_->num_cols_e(), num_cols());
+ VectorRef(y, num_cols()) =
+ (Dref.array().square() *
+ ConstVectorRef(x, num_cols()).array()).matrix();
+ } else {
+ VectorRef(y, num_cols()).setZero();
+ }
+
+ // y = y5 + F' y1
+ A_->LeftMultiplyF(tmp_rows_.data(), y);
+}
+
+// Given a block diagonal matrix and an optional array of diagonal
+// entries D, add them to the diagonal of the matrix and compute the
+// inverse of each diagonal block.
+void ImplicitSchurComplement::AddDiagonalAndInvert(
+ const double* D,
+ BlockSparseMatrix* block_diagonal) {
+ const CompressedRowBlockStructure* block_diagonal_structure =
+ block_diagonal->block_structure();
+ for (int r = 0; r < block_diagonal_structure->rows.size(); ++r) {
+ const int row_block_pos = block_diagonal_structure->rows[r].block.position;
+ const int row_block_size = block_diagonal_structure->rows[r].block.size;
+ const Cell& cell = block_diagonal_structure->rows[r].cells[0];
+ MatrixRef m(block_diagonal->mutable_values() + cell.position,
+ row_block_size, row_block_size);
+
+ if (D != NULL) {
+ ConstVectorRef d(D + row_block_pos, row_block_size);
+ m += d.array().square().matrix().asDiagonal();
+ }
+
+ m = m
+ .selfadjointView<Eigen::Upper>()
+ .ldlt()
+ .solve(Matrix::Identity(row_block_size, row_block_size));
+ }
+}
+
+// Similar to RightMultiply, use the block structure of the matrix A
+// to compute y = (E'E)^-1 (E'b - E'F x).
+void ImplicitSchurComplement::BackSubstitute(const double* x, double* y) {
+ const int num_cols_e = A_->num_cols_e();
+ const int num_cols_f = A_->num_cols_f();
+ const int num_cols = A_->num_cols();
+ const int num_rows = A_->num_rows();
+
+ // y1 = F x
+ tmp_rows_.setZero();
+ A_->RightMultiplyF(x, tmp_rows_.data());
+
+ // y2 = b - y1
+ tmp_rows_ = ConstVectorRef(b_, num_rows) - tmp_rows_;
+
+ // y3 = E' y2
+ tmp_e_cols_.setZero();
+ A_->LeftMultiplyE(tmp_rows_.data(), tmp_e_cols_.data());
+
+ // y = (E'E)^-1 y3
+ VectorRef(y, num_cols).setZero();
+ block_diagonal_EtE_inverse_->RightMultiply(tmp_e_cols_.data(), y);
+
+ // The full solution vector y has two blocks. The first block of
+ // variables corresponds to the eliminated variables, which we just
+ // computed via back substitution. The second block of variables
+ // corresponds to the Schur complement system, so we just copy those
+ // values from the solution to the Schur complement.
+ VectorRef(y + num_cols_e, num_cols_f) = ConstVectorRef(x, num_cols_f);
+}
+
+// Compute the RHS of the Schur complement system.
+//
+// rhs = F'b - F'E (E'E)^-1 E'b
+//
+// Like BackSubstitute, we use the block structure of A to implement
+// this using a series of matrix vector products.
+void ImplicitSchurComplement::UpdateRhs() {
+ // y1 = E'b
+ tmp_e_cols_.setZero();
+ A_->LeftMultiplyE(b_, tmp_e_cols_.data());
+
+ // y2 = (E'E)^-1 y1
+ Vector y2 = Vector::Zero(A_->num_cols_e());
+ block_diagonal_EtE_inverse_->RightMultiply(tmp_e_cols_.data(), y2.data());
+
+ // y3 = E y2
+ tmp_rows_.setZero();
+ A_->RightMultiplyE(y2.data(), tmp_rows_.data());
+
+ // y3 = b - y3
+ tmp_rows_ = ConstVectorRef(b_, A_->num_rows()) - tmp_rows_;
+
+ // rhs = F' y3
+ rhs_.setZero();
+ A_->LeftMultiplyF(tmp_rows_.data(), rhs_.data());
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/implicit_schur_complement.h b/internal/ceres/implicit_schur_complement.h
new file mode 100644
index 0000000..b9ebaa4
--- /dev/null
+++ b/internal/ceres/implicit_schur_complement.h
@@ -0,0 +1,168 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// An iterative solver for solving the Schur complement/reduced camera
+// linear system that arise in SfM problems.
+
+#ifndef CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_
+#define CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_
+
+#include "ceres/linear_operator.h"
+#include "ceres/partitioned_matrix_view.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockSparseMatrix;
+class BlockSparseMatrixBase;
+
+// This class implements various linear algebraic operations related
+// to the Schur complement without explicitly forming it.
+//
+//
+// Given a reactangular linear system Ax = b, where
+//
+// A = [E F]
+//
+// The normal equations are given by
+//
+// A'Ax = A'b
+//
+// |E'E E'F||y| = |E'b|
+// |F'E F'F||z| |F'b|
+//
+// and the Schur complement system is given by
+//
+// [F'F - F'E (E'E)^-1 E'F] z = F'b - F'E (E'E)^-1 E'b
+//
+// Now if we wish to solve Ax = b in the least squares sense, one way
+// is to form this Schur complement system and solve it using
+// Preconditioned Conjugate Gradients.
+//
+// The key operation in a conjugate gradient solver is the evaluation of the
+// matrix vector product with the Schur complement
+//
+// S = F'F - F'E (E'E)^-1 E'F
+//
+// It is straightforward to see that matrix vector products with S can
+// be evaluated without storing S in memory. Instead, given (E'E)^-1
+// (which for our purposes is an easily inverted block diagonal
+// matrix), it can be done in terms of matrix vector products with E,
+// F and (E'E)^-1. This class implements this functionality and other
+// auxilliary bits needed to implement a CG solver on the Schur
+// complement using the PartitionedMatrixView object.
+//
+// THREAD SAFETY: This class is nqot thread safe. In particular, the
+// RightMultiply (and the LeftMultiply) methods are not thread safe as
+// they depend on mutable arrays used for the temporaries needed to
+// compute the product y += Sx;
+class ImplicitSchurComplement : public LinearOperator {
+ public:
+ // num_eliminate_blocks is the number of E blocks in the matrix
+ // A.
+ //
+ // preconditioner indicates whether the inverse of the matrix F'F
+ // should be computed or not as a preconditioner for the Schur
+ // Complement.
+ //
+ // TODO(sameeragarwal): Get rid of the two bools below and replace
+ // them with enums.
+ ImplicitSchurComplement(int num_eliminate_blocks, bool preconditioner);
+ virtual ~ImplicitSchurComplement();
+
+ // Initialize the Schur complement for a linear least squares
+ // problem of the form
+ //
+ // |A | x = |b|
+ // |diag(D)| |0|
+ //
+ // If D is null, then it is treated as a zero dimensional matrix. It
+ // is important that the matrix A have a BlockStructure object
+ // associated with it and has a block structure that is compatible
+ // with the SchurComplement solver.
+ void Init(const BlockSparseMatrixBase& A, const double* D, const double* b);
+
+ // y += Sx, where S is the Schur complement.
+ virtual void RightMultiply(const double* x, double* y) const;
+
+ // The Schur complement is a symmetric positive definite matrix,
+ // thus the left and right multiply operators are the same.
+ virtual void LeftMultiply(const double* x, double* y) const {
+ RightMultiply(x, y);
+ }
+
+ // y = (E'E)^-1 (E'b - E'F x). Given an estimate of the solution to
+ // the Schur complement system, this method computes the value of
+ // the e_block variables that were eliminated to form the Schur
+ // complement.
+ void BackSubstitute(const double* x, double* y);
+
+ virtual int num_rows() const { return A_->num_cols_f(); }
+ virtual int num_cols() const { return A_->num_cols_f(); }
+ const Vector& rhs() const { return rhs_; }
+
+ const BlockSparseMatrix* block_diagonal_EtE_inverse() const {
+ return block_diagonal_EtE_inverse_.get();
+ }
+
+ const BlockSparseMatrix* block_diagonal_FtF_inverse() const {
+ return block_diagonal_FtF_inverse_.get();
+ }
+
+ private:
+ void AddDiagonalAndInvert(const double* D, BlockSparseMatrix* matrix);
+ void UpdateRhs();
+
+ int num_eliminate_blocks_;
+ bool preconditioner_;
+
+ scoped_ptr<PartitionedMatrixView> A_;
+ const double* D_;
+ const double* b_;
+
+ scoped_ptr<BlockSparseMatrix> block_diagonal_EtE_inverse_;
+ scoped_ptr<BlockSparseMatrix> block_diagonal_FtF_inverse_;
+
+ Vector rhs_;
+
+ // Temporary storage vectors used to implement RightMultiply.
+ mutable Vector tmp_rows_;
+ mutable Vector tmp_e_cols_;
+ mutable Vector tmp_e_cols_2_;
+ mutable Vector tmp_f_cols_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_
diff --git a/internal/ceres/implicit_schur_complement_test.cc b/internal/ceres/implicit_schur_complement_test.cc
new file mode 100644
index 0000000..bd36672
--- /dev/null
+++ b/internal/ceres/implicit_schur_complement_test.cc
@@ -0,0 +1,196 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/implicit_schur_complement.h"
+
+#include <cstddef>
+#include "Eigen/Dense"
+#include "ceres/block_random_access_dense_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using testing::AssertionResult;
+
+const double kEpsilon = 1e-14;
+
+class ImplicitSchurComplementTest : public ::testing::Test {
+ protected :
+ virtual void SetUp() {
+ scoped_ptr<LinearLeastSquaresProblem> problem(
+ CreateLinearLeastSquaresProblemFromId(2));
+
+ CHECK_NOTNULL(problem.get());
+ A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+ b_.reset(problem->b.release());
+ D_.reset(problem->D.release());
+
+ num_cols_ = A_->num_cols();
+ num_rows_ = A_->num_rows();
+ num_eliminate_blocks_ = problem->num_eliminate_blocks;
+ }
+
+ void ReducedLinearSystemAndSolution(double* D,
+ Matrix* lhs,
+ Vector* rhs,
+ Vector* solution) {
+ const CompressedRowBlockStructure* bs = A_->block_structure();
+ const int num_col_blocks = bs->cols.size();
+ vector<int> blocks(num_col_blocks - num_eliminate_blocks_, 0);
+ for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
+ blocks[i - num_eliminate_blocks_] = bs->cols[i].size;
+ }
+
+ BlockRandomAccessDenseMatrix blhs(blocks);
+ const int num_schur_rows = blhs.num_rows();
+
+ LinearSolver::Options options;
+ options.elimination_groups.push_back(num_eliminate_blocks_);
+ options.type = DENSE_SCHUR;
+
+ scoped_ptr<SchurEliminatorBase> eliminator(
+ SchurEliminatorBase::Create(options));
+ CHECK_NOTNULL(eliminator.get());
+ eliminator->Init(num_eliminate_blocks_, bs);
+
+ lhs->resize(num_schur_rows, num_schur_rows);
+ rhs->resize(num_schur_rows);
+
+ eliminator->Eliminate(A_.get(), b_.get(), D, &blhs, rhs->data());
+
+ MatrixRef lhs_ref(blhs.mutable_values(), num_schur_rows, num_schur_rows);
+
+ // lhs_ref is an upper triangular matrix. Construct a full version
+ // of lhs_ref in lhs by transposing lhs_ref, choosing the strictly
+ // lower triangular part of the matrix and adding it to lhs_ref.
+ *lhs = lhs_ref;
+ lhs->triangularView<Eigen::StrictlyLower>() =
+ lhs_ref.triangularView<Eigen::StrictlyUpper>().transpose();
+
+ solution->resize(num_cols_);
+ solution->setZero();
+ VectorRef schur_solution(solution->data() + num_cols_ - num_schur_rows,
+ num_schur_rows);
+ schur_solution = lhs->selfadjointView<Eigen::Upper>().ldlt().solve(*rhs);
+ eliminator->BackSubstitute(A_.get(), b_.get(), D,
+ schur_solution.data(), solution->data());
+ }
+
+ AssertionResult TestImplicitSchurComplement(double* D) {
+ Matrix lhs;
+ Vector rhs;
+ Vector reference_solution;
+ ReducedLinearSystemAndSolution(D, &lhs, &rhs, &reference_solution);
+
+ ImplicitSchurComplement isc(num_eliminate_blocks_, true);
+ isc.Init(*A_, D, b_.get());
+
+ int num_sc_cols = lhs.cols();
+
+ for (int i = 0; i < num_sc_cols; ++i) {
+ Vector x(num_sc_cols);
+ x.setZero();
+ x(i) = 1.0;
+
+ Vector y(num_sc_cols);
+ y = lhs * x;
+
+ Vector z(num_sc_cols);
+ isc.RightMultiply(x.data(), z.data());
+
+ // The i^th column of the implicit schur complement is the same as
+ // the explicit schur complement.
+ if ((y - z).norm() > kEpsilon) {
+ return testing::AssertionFailure()
+ << "Explicit and Implicit SchurComplements differ in "
+ << "column " << i << ". explicit: " << y.transpose()
+ << " implicit: " << z.transpose();
+ }
+ }
+
+ // Compare the rhs of the reduced linear system
+ if ((isc.rhs() - rhs).norm() > kEpsilon) {
+ return testing::AssertionFailure()
+ << "Explicit and Implicit SchurComplements differ in "
+ << "rhs. explicit: " << rhs.transpose()
+ << " implicit: " << isc.rhs().transpose();
+ }
+
+ // Reference solution to the f_block.
+ const Vector reference_f_sol =
+ lhs.selfadjointView<Eigen::Upper>().ldlt().solve(rhs);
+
+ // Backsubstituted solution from the implicit schur solver using the
+ // reference solution to the f_block.
+ Vector sol(num_cols_);
+ isc.BackSubstitute(reference_f_sol.data(), sol.data());
+ if ((sol - reference_solution).norm() > kEpsilon) {
+ return testing::AssertionFailure()
+ << "Explicit and Implicit SchurComplements solutions differ. "
+ << "explicit: " << reference_solution.transpose()
+ << " implicit: " << sol.transpose();
+ }
+
+ return testing::AssertionSuccess();
+ }
+
+ int num_rows_;
+ int num_cols_;
+ int num_eliminate_blocks_;
+
+ scoped_ptr<BlockSparseMatrix> A_;
+ scoped_array<double> b_;
+ scoped_array<double> D_;
+};
+
+// Verify that the Schur Complement matrix implied by the
+// ImplicitSchurComplement class matches the one explicitly computed
+// by the SchurComplement solver.
+//
+// We do this with and without regularization to check that the
+// support for the LM diagonal is correct.
+TEST_F(ImplicitSchurComplementTest, SchurMatrixValuesTest) {
+ EXPECT_TRUE(TestImplicitSchurComplement(NULL));
+ EXPECT_TRUE(TestImplicitSchurComplement(D_.get()));
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/integral_types.h b/internal/ceres/integral_types.h
new file mode 100644
index 0000000..01e0493
--- /dev/null
+++ b/internal/ceres/integral_types.h
@@ -0,0 +1,92 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Portable typedefs for various fixed-size integers. Uses template
+// metaprogramming instead of fragile compiler defines.
+
+#ifndef CERES_INTERNAL_INTEGRAL_TYPES_H_
+#define CERES_INTERNAL_INTEGRAL_TYPES_H_
+
+namespace ceres {
+namespace internal {
+
+// Compile time ternary on types.
+template<bool kCondition, typename kTrueType, typename kFalseType>
+struct Ternary {
+ typedef kTrueType type;
+};
+template<typename kTrueType, typename kFalseType>
+struct Ternary<false, kTrueType, kFalseType> {
+ typedef kFalseType type;
+};
+
+#define CERES_INTSIZE(TYPE) \
+ typename Ternary<sizeof(TYPE) * 8 == kBits, TYPE,
+
+template<int kBits>
+struct Integer {
+ typedef
+ CERES_INTSIZE(char)
+ CERES_INTSIZE(short)
+ CERES_INTSIZE(int)
+ CERES_INTSIZE(long int)
+ CERES_INTSIZE(long long)
+ void>::type >::type >::type >::type >::type
+ type;
+};
+
+template<int kBits>
+struct UnsignedInteger {
+ typedef
+ CERES_INTSIZE(unsigned char)
+ CERES_INTSIZE(unsigned short)
+ CERES_INTSIZE(unsigned int)
+ CERES_INTSIZE(unsigned long int)
+ CERES_INTSIZE(unsigned long long)
+ void>::type >::type >::type >::type >::type
+ type;
+};
+
+#undef CERES_INTSIZE
+
+typedef Integer< 8>::type int8;
+typedef Integer<16>::type int16;
+typedef Integer<32>::type int32;
+typedef Integer<64>::type int64;
+
+typedef UnsignedInteger< 8>::type uint8;
+typedef UnsignedInteger<16>::type uint16;
+typedef UnsignedInteger<32>::type uint32;
+typedef UnsignedInteger<64>::type uint64;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_INTEGRAL_TYPES_H_
diff --git a/internal/ceres/iterative_schur_complement_solver.cc b/internal/ceres/iterative_schur_complement_solver.cc
new file mode 100644
index 0000000..376a586
--- /dev/null
+++ b/internal/ceres/iterative_schur_complement_solver.cc
@@ -0,0 +1,145 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/iterative_schur_complement_solver.h"
+
+#include <algorithm>
+#include <cstring>
+#include <vector>
+#include "Eigen/Dense"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/conjugate_gradients_solver.h"
+#include "ceres/implicit_schur_complement.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_solver.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "ceres/visibility_based_preconditioner.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+IterativeSchurComplementSolver::IterativeSchurComplementSolver(
+ const LinearSolver::Options& options)
+ : options_(options) {
+}
+
+IterativeSchurComplementSolver::~IterativeSchurComplementSolver() {
+}
+
+LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
+ BlockSparseMatrixBase* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) {
+ CHECK_NOTNULL(A->block_structure());
+
+ // Initialize a ImplicitSchurComplement object.
+ if (schur_complement_ == NULL) {
+ schur_complement_.reset(
+ new ImplicitSchurComplement(options_.elimination_groups[0],
+ options_.preconditioner_type == JACOBI));
+ }
+ schur_complement_->Init(*A, per_solve_options.D, b);
+
+ // Initialize the solution to the Schur complement system to zero.
+ //
+ // TODO(sameeragarwal): There maybe a better initialization than an
+ // all zeros solution. Explore other cheap starting points.
+ reduced_linear_system_solution_.resize(schur_complement_->num_rows());
+ reduced_linear_system_solution_.setZero();
+
+ // Instantiate a conjugate gradient solver that runs on the Schur complement
+ // matrix with the block diagonal of the matrix F'F as the preconditioner.
+ LinearSolver::Options cg_options;
+ cg_options.max_num_iterations = options_.max_num_iterations;
+ ConjugateGradientsSolver cg_solver(cg_options);
+ LinearSolver::PerSolveOptions cg_per_solve_options;
+
+ cg_per_solve_options.r_tolerance = per_solve_options.r_tolerance;
+ cg_per_solve_options.q_tolerance = per_solve_options.q_tolerance;
+
+ bool is_preconditioner_good = false;
+ switch (options_.preconditioner_type) {
+ case IDENTITY:
+ is_preconditioner_good = true;
+ break;
+ case JACOBI:
+ // We need to strip the constness of the block_diagonal_FtF_inverse
+ // matrix here because the only other way to initialize the struct
+ // cg_solve_options would be to add a constructor to it. We know
+ // that the only method ever called on the preconditioner is the
+ // RightMultiply which is a const method so we don't need to worry
+ // about the object getting modified.
+ cg_per_solve_options.preconditioner =
+ const_cast<BlockSparseMatrix*>(
+ schur_complement_->block_diagonal_FtF_inverse());
+ is_preconditioner_good = true;
+ break;
+ case SCHUR_JACOBI:
+ case CLUSTER_JACOBI:
+ case CLUSTER_TRIDIAGONAL:
+ if (visibility_based_preconditioner_.get() == NULL) {
+ visibility_based_preconditioner_.reset(
+ new VisibilityBasedPreconditioner(*A->block_structure(), options_));
+ }
+ is_preconditioner_good =
+ visibility_based_preconditioner_->Update(*A, per_solve_options.D);
+ cg_per_solve_options.preconditioner =
+ visibility_based_preconditioner_.get();
+ break;
+ default:
+ LOG(FATAL) << "Unknown Preconditioner Type";
+ }
+
+ LinearSolver::Summary cg_summary;
+ cg_summary.num_iterations = 0;
+ cg_summary.termination_type = FAILURE;
+
+ if (is_preconditioner_good) {
+ cg_summary = cg_solver.Solve(schur_complement_.get(),
+ schur_complement_->rhs().data(),
+ cg_per_solve_options,
+ reduced_linear_system_solution_.data());
+ if (cg_summary.termination_type != FAILURE) {
+ schur_complement_->BackSubstitute(
+ reduced_linear_system_solution_.data(), x);
+ }
+ }
+
+ VLOG(2) << "CG Iterations : " << cg_summary.num_iterations;
+ return cg_summary;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/iterative_schur_complement_solver.h b/internal/ceres/iterative_schur_complement_solver.h
new file mode 100644
index 0000000..cfeb65e
--- /dev/null
+++ b/internal/ceres/iterative_schur_complement_solver.h
@@ -0,0 +1,94 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_ITERATIVE_SCHUR_COMPLEMENT_SOLVER_H_
+#define CERES_INTERNAL_ITERATIVE_SCHUR_COMPLEMENT_SOLVER_H_
+
+#include "ceres/linear_solver.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockSparseMatrix;
+class BlockSparseMatrixBase;
+class ImplicitSchurComplement;
+class VisibilityBasedPreconditioner;
+
+// This class implements an iterative solver for the linear least
+// squares problems that have a bi-partitte sparsity structure common
+// to Structure from Motion problems.
+//
+// The algorithm used by this solver was developed in a series of
+// papers - "Agarwal et al, Bundle Adjustment in the Large, ECCV 2010"
+// and "Wu et al, Multicore Bundle Adjustment, submitted to CVPR
+// 2011" at the Univeristy of Washington.
+//
+// The key idea is that one can run Conjugate Gradients on the Schur
+// Complement system without explicitly forming the Schur Complement
+// in memory. The heavy lifting for this is done by the
+// ImplicitSchurComplement class. Not forming the Schur complement in
+// memory and factoring it results in substantial savings in time and
+// memory. Further, iterative solvers like this open up the
+// possibility of solving the Newton equations in a non-linear solver
+// only approximately and terminating early, thereby saving even more
+// time.
+//
+// For the curious, running CG on the Schur complement is the same as
+// running CG on the Normal Equations with an SSOR preconditioner. For
+// a proof of this fact and others related to this solver please see
+// the section on Domain Decomposition Methods in Saad's book
+// "Iterative Methods for Sparse Linear Systems".
+class IterativeSchurComplementSolver : public BlockSparseMatrixBaseSolver {
+ public:
+ explicit IterativeSchurComplementSolver(
+ const LinearSolver::Options& options);
+
+ virtual ~IterativeSchurComplementSolver();
+
+ private:
+ virtual LinearSolver::Summary SolveImpl(
+ BlockSparseMatrixBase* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& options,
+ double* x);
+
+ LinearSolver::Options options_;
+ scoped_ptr<internal::ImplicitSchurComplement> schur_complement_;
+ scoped_ptr<VisibilityBasedPreconditioner> visibility_based_preconditioner_;
+ Vector reduced_linear_system_solution_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_ITERATIVE_SCHUR_COMPLEMENT_SOLVER_H_
diff --git a/internal/ceres/iterative_schur_complement_solver_test.cc b/internal/ceres/iterative_schur_complement_solver_test.cc
new file mode 100644
index 0000000..86e7825
--- /dev/null
+++ b/internal/ceres/iterative_schur_complement_solver_test.cc
@@ -0,0 +1,123 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// TODO(sameeragarwal): Add support for larger, more complicated and
+// poorly conditioned problems both for correctness testing as well as
+// benchmarking.
+
+#include "ceres/iterative_schur_complement_solver.h"
+
+#include <cstddef>
+#include "Eigen/Dense"
+#include "ceres/block_random_access_dense_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using testing::AssertionResult;
+
+const double kEpsilon = 1e-14;
+
+class IterativeSchurComplementSolverTest : public ::testing::Test {
+ protected :
+ virtual void SetUp() {
+ scoped_ptr<LinearLeastSquaresProblem> problem(
+ CreateLinearLeastSquaresProblemFromId(2));
+
+ CHECK_NOTNULL(problem.get());
+ A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+ b_.reset(problem->b.release());
+ D_.reset(problem->D.release());
+
+ num_cols_ = A_->num_cols();
+ num_rows_ = A_->num_rows();
+ num_eliminate_blocks_ = problem->num_eliminate_blocks;
+ }
+
+ AssertionResult TestSolver(double* D) {
+ TripletSparseMatrix triplet_A(A_->num_rows(),
+ A_->num_cols(),
+ A_->num_nonzeros());
+ A_->ToTripletSparseMatrix(&triplet_A);
+
+ DenseSparseMatrix dense_A(triplet_A);
+
+ LinearSolver::Options options;
+ options.type = DENSE_QR;
+ scoped_ptr<LinearSolver> qr(LinearSolver::Create(options));
+
+ LinearSolver::PerSolveOptions per_solve_options;
+ per_solve_options.D = D;
+ Vector reference_solution(num_cols_);
+ qr->Solve(&dense_A, b_.get(), per_solve_options, reference_solution.data());
+
+ options.elimination_groups.push_back(num_eliminate_blocks_);
+ options.max_num_iterations = num_cols_;
+ IterativeSchurComplementSolver isc(options);
+
+ Vector isc_sol(num_cols_);
+ per_solve_options.r_tolerance = 1e-12;
+ isc.Solve(A_.get(), b_.get(), per_solve_options, isc_sol.data());
+ double diff = (isc_sol - reference_solution).norm();
+ if (diff < kEpsilon) {
+ return testing::AssertionSuccess();
+ } else {
+ return testing::AssertionFailure()
+ << "The reference solution differs from the ITERATIVE_SCHUR"
+ << " solution by " << diff << " which is more than " << kEpsilon;
+ }
+ }
+
+ int num_rows_;
+ int num_cols_;
+ int num_eliminate_blocks_;
+ scoped_ptr<BlockSparseMatrix> A_;
+ scoped_array<double> b_;
+ scoped_array<double> D_;
+};
+
+TEST_F(IterativeSchurComplementSolverTest, SolverTest) {
+ EXPECT_TRUE(TestSolver(NULL));
+ EXPECT_TRUE(TestSolver(D_.get()));
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/jet_test.cc b/internal/ceres/jet_test.cc
new file mode 100644
index 0000000..0dd4336
--- /dev/null
+++ b/internal/ceres/jet_test.cc
@@ -0,0 +1,335 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/jet.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+#include "ceres/fpclassify.h"
+#include "ceres/stringprintf.h"
+#include "ceres/test_util.h"
+
+#define VL VLOG(1)
+
+namespace ceres {
+namespace internal {
+
+const double kE = 2.71828182845904523536;
+
+typedef Jet<double, 2> J;
+
+// Convenient shorthand for making a jet.
+J MakeJet(double a, double v0, double v1) {
+ J z;
+ z.a = a;
+ z.v[0] = v0;
+ z.v[1] = v1;
+ return z;
+}
+
+// On a 32-bit optimized build, the mismatch is about 1.4e-14.
+double const kTolerance = 1e-13;
+
+void ExpectJetsClose(const J &x, const J &y) {
+ ExpectClose(x.a, y.a, kTolerance);
+ ExpectClose(x.v[0], y.v[0], kTolerance);
+ ExpectClose(x.v[1], y.v[1], kTolerance);
+}
+
+TEST(Jet, Jet) {
+ // Pick arbitrary values for x and y.
+ J x = MakeJet(2.3, -2.7, 1e-3);
+ J y = MakeJet(1.7, 0.5, 1e+2);
+
+ VL << "x = " << x;
+ VL << "y = " << y;
+
+ { // Check that log(exp(x)) == x.
+ J z = exp(x);
+ J w = log(z);
+ VL << "z = " << z;
+ VL << "w = " << w;
+ ExpectJetsClose(w, x);
+ }
+
+ { // Check that (x * y) / x == y.
+ J z = x * y;
+ J w = z / x;
+ VL << "z = " << z;
+ VL << "w = " << w;
+ ExpectJetsClose(w, y);
+ }
+
+ { // Check that sqrt(x * x) == x.
+ J z = x * x;
+ J w = sqrt(z);
+ VL << "z = " << z;
+ VL << "w = " << w;
+ ExpectJetsClose(w, x);
+ }
+
+ { // Check that sqrt(y) * sqrt(y) == y.
+ J z = sqrt(y);
+ J w = z * z;
+ VL << "z = " << z;
+ VL << "w = " << w;
+ ExpectJetsClose(w, y);
+ }
+
+ { // Check that cos(2*x) = cos(x)^2 - sin(x)^2
+ J z = cos(J(2.0) * x);
+ J w = cos(x)*cos(x) - sin(x)*sin(x);
+ VL << "z = " << z;
+ VL << "w = " << w;
+ ExpectJetsClose(w, z);
+ }
+
+ { // Check that sin(2*x) = 2*cos(x)*sin(x)
+ J z = sin(J(2.0) * x);
+ J w = J(2.0)*cos(x)*sin(x);
+ VL << "z = " << z;
+ VL << "w = " << w;
+ ExpectJetsClose(w, z);
+ }
+
+ { // Check that cos(x)*cos(x) + sin(x)*sin(x) = 1
+ J z = cos(x) * cos(x);
+ J w = sin(x) * sin(x);
+ VL << "z = " << z;
+ VL << "w = " << w;
+ ExpectJetsClose(z + w, J(1.0));
+ }
+
+ { // Check that atan2(r*sin(t), r*cos(t)) = t.
+ J t = MakeJet(0.7, -0.3, +1.5);
+ J r = MakeJet(2.3, 0.13, -2.4);
+ VL << "t = " << t;
+ VL << "r = " << r;
+
+ J u = atan2(r * sin(t), r * cos(t));
+ VL << "u = " << u;
+
+ ExpectJetsClose(u, t);
+ }
+
+ { // Check that pow(x, 1) == x.
+ VL << "x = " << x;
+
+ J u = pow(x, 1.);
+ VL << "u = " << u;
+
+ ExpectJetsClose(x, u);
+ }
+
+ { // Check that pow(x, 1) == x.
+ J y = MakeJet(1, 0.0, 0.0);
+ VL << "x = " << x;
+ VL << "y = " << y;
+
+ J u = pow(x, y);
+ VL << "u = " << u;
+
+ ExpectJetsClose(x, u);
+ }
+
+ { // Check that pow(e, log(x)) == x.
+ J logx = log(x);
+
+ VL << "x = " << x;
+ VL << "y = " << y;
+
+ J u = pow(kE, logx);
+ VL << "u = " << u;
+
+ ExpectJetsClose(x, u);
+ }
+
+ { // Check that pow(e, log(x)) == x.
+ J logx = log(x);
+ J e = MakeJet(kE, 0., 0.);
+ VL << "x = " << x;
+ VL << "log(x) = " << logx;
+
+ J u = pow(e, logx);
+ VL << "u = " << u;
+
+ ExpectJetsClose(x, u);
+ }
+
+ { // Check that pow(e, log(x)) == x.
+ J logx = log(x);
+ J e = MakeJet(kE, 0., 0.);
+ VL << "x = " << x;
+ VL << "logx = " << logx;
+
+ J u = pow(e, logx);
+ VL << "u = " << u;
+
+ ExpectJetsClose(x, u);
+ }
+
+ { // Check that pow(x,y) = exp(y*log(x)).
+ J logx = log(x);
+ J e = MakeJet(kE, 0., 0.);
+ VL << "x = " << x;
+ VL << "logx = " << logx;
+
+ J u = pow(e, y*logx);
+ J v = pow(x, y);
+ VL << "u = " << u;
+ VL << "v = " << v;
+
+ ExpectJetsClose(v, u);
+ }
+
+ { // Check that 1 + x == x + 1.
+ J a = x + 1.0;
+ J b = 1.0 + x;
+
+ ExpectJetsClose(a, b);
+ }
+
+ { // Check that 1 - x == -(x - 1).
+ J a = 1.0 - x;
+ J b = -(x - 1.0);
+
+ ExpectJetsClose(a, b);
+ }
+
+ { // Check that x/s == x*s.
+ J a = x / 5.0;
+ J b = x * 5.0;
+
+ ExpectJetsClose(5.0 * a, b / 5.0);
+ }
+
+ { // Check that x / y == 1 / (y / x).
+ J a = x / y;
+ J b = 1.0 / (y / x);
+ VL << "a = " << a;
+ VL << "b = " << b;
+
+ ExpectJetsClose(a, b);
+ }
+
+ { // Check that abs(-x * x) == sqrt(x * x).
+ ExpectJetsClose(abs(-x), sqrt(x * x));
+ }
+
+ { // Check that cos(acos(x)) == x.
+ J a = MakeJet(0.1, -2.7, 1e-3);
+ ExpectJetsClose(cos(acos(a)), a);
+ ExpectJetsClose(acos(cos(a)), a);
+
+ J b = MakeJet(0.6, 0.5, 1e+2);
+ ExpectJetsClose(cos(acos(b)), b);
+ ExpectJetsClose(acos(cos(b)), b);
+ }
+
+ { // Check that sin(asin(x)) == x.
+ J a = MakeJet(0.1, -2.7, 1e-3);
+ ExpectJetsClose(sin(asin(a)), a);
+ ExpectJetsClose(asin(sin(a)), a);
+
+ J b = MakeJet(0.4, 0.5, 1e+2);
+ ExpectJetsClose(sin(asin(b)), b);
+ ExpectJetsClose(asin(sin(b)), b);
+ }
+}
+
+TEST(Jet, JetsInEigenMatrices) {
+ J x = MakeJet(2.3, -2.7, 1e-3);
+ J y = MakeJet(1.7, 0.5, 1e+2);
+ J z = MakeJet(5.3, -4.7, 1e-3);
+ J w = MakeJet(9.7, 1.5, 10.1);
+
+ Eigen::Matrix<J, 2, 2> M;
+ Eigen::Matrix<J, 2, 1> v, r1, r2;
+
+ M << x, y, z, w;
+ v << x, z;
+
+ // Check that M * v == (v^T * M^T)^T
+ r1 = M * v;
+ r2 = (v.transpose() * M.transpose()).transpose();
+
+ ExpectJetsClose(r1(0), r2(0));
+ ExpectJetsClose(r1(1), r2(1));
+}
+
+TEST(JetTraitsTest, ClassificationMixed) {
+ Jet<double, 3> a(5.5, 0);
+ a.v[0] = std::numeric_limits<double>::quiet_NaN();
+ a.v[1] = std::numeric_limits<double>::infinity();
+ a.v[2] = -std::numeric_limits<double>::infinity();
+ EXPECT_FALSE(IsFinite(a));
+ EXPECT_FALSE(IsNormal(a));
+ EXPECT_TRUE(IsInfinite(a));
+ EXPECT_TRUE(IsNaN(a));
+}
+
+TEST(JetTraitsTest, ClassificationNaN) {
+ Jet<double, 3> a(5.5, 0);
+ a.v[0] = std::numeric_limits<double>::quiet_NaN();
+ a.v[1] = 0.0;
+ a.v[2] = 0.0;
+ EXPECT_FALSE(IsFinite(a));
+ EXPECT_FALSE(IsNormal(a));
+ EXPECT_FALSE(IsInfinite(a));
+ EXPECT_TRUE(IsNaN(a));
+}
+
+TEST(JetTraitsTest, ClassificationInf) {
+ Jet<double, 3> a(5.5, 0);
+ a.v[0] = std::numeric_limits<double>::infinity();
+ a.v[1] = 0.0;
+ a.v[2] = 0.0;
+ EXPECT_FALSE(IsFinite(a));
+ EXPECT_FALSE(IsNormal(a));
+ EXPECT_TRUE(IsInfinite(a));
+ EXPECT_FALSE(IsNaN(a));
+}
+
+TEST(JetTraitsTest, ClassificationFinite) {
+ Jet<double, 3> a(5.5, 0);
+ a.v[0] = 100.0;
+ a.v[1] = 1.0;
+ a.v[2] = 3.14159;
+ EXPECT_TRUE(IsFinite(a));
+ EXPECT_TRUE(IsNormal(a));
+ EXPECT_FALSE(IsInfinite(a));
+ EXPECT_FALSE(IsNaN(a));
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/levenberg_marquardt_strategy.cc b/internal/ceres/levenberg_marquardt_strategy.cc
new file mode 100644
index 0000000..9e6a59e
--- /dev/null
+++ b/internal/ceres/levenberg_marquardt_strategy.cc
@@ -0,0 +1,144 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/levenberg_marquardt_strategy.h"
+
+#include <cmath>
+#include "Eigen/Core"
+#include "ceres/array_utils.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/trust_region_strategy.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+LevenbergMarquardtStrategy::LevenbergMarquardtStrategy(
+ const TrustRegionStrategy::Options& options)
+ : linear_solver_(options.linear_solver),
+ radius_(options.initial_radius),
+ max_radius_(options.max_radius),
+ min_diagonal_(options.lm_min_diagonal),
+ max_diagonal_(options.lm_max_diagonal),
+ decrease_factor_(2.0),
+ reuse_diagonal_(false) {
+ CHECK_NOTNULL(linear_solver_);
+ CHECK_GT(min_diagonal_, 0.0);
+ CHECK_LE(min_diagonal_, max_diagonal_);
+ CHECK_GT(max_radius_, 0.0);
+}
+
+LevenbergMarquardtStrategy::~LevenbergMarquardtStrategy() {
+}
+
+TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep(
+ const TrustRegionStrategy::PerSolveOptions& per_solve_options,
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step) {
+ CHECK_NOTNULL(jacobian);
+ CHECK_NOTNULL(residuals);
+ CHECK_NOTNULL(step);
+
+ const int num_parameters = jacobian->num_cols();
+ if (!reuse_diagonal_) {
+ if (diagonal_.rows() != num_parameters) {
+ diagonal_.resize(num_parameters, 1);
+ }
+
+ jacobian->SquaredColumnNorm(diagonal_.data());
+ for (int i = 0; i < num_parameters; ++i) {
+ diagonal_[i] = min(max(diagonal_[i], min_diagonal_), max_diagonal_);
+ }
+ }
+
+ lm_diagonal_ = (diagonal_ / radius_).array().sqrt();
+
+ LinearSolver::PerSolveOptions solve_options;
+ solve_options.D = lm_diagonal_.data();
+ solve_options.q_tolerance = per_solve_options.eta;
+ // Disable r_tolerance checking. Since we only care about
+ // termination via the q_tolerance. As Nash and Sofer show,
+ // r_tolerance based termination is essentially useless in
+ // Truncated Newton methods.
+ solve_options.r_tolerance = -1.0;
+
+ // Invalidate the output array lm_step, so that we can detect if
+ // the linear solver generated numerical garbage. This is known
+ // to happen for the DENSE_QR and then DENSE_SCHUR solver when
+ // the Jacobin is severly rank deficient and mu is too small.
+ InvalidateArray(num_parameters, step);
+
+ // Instead of solving Jx = -r, solve Jy = r.
+ // Then x can be found as x = -y, but the inputs jacobian and residuals
+ // do not need to be modified.
+ LinearSolver::Summary linear_solver_summary =
+ linear_solver_->Solve(jacobian, residuals, solve_options, step);
+ if (linear_solver_summary.termination_type == FAILURE ||
+ !IsArrayValid(num_parameters, step)) {
+ LOG(WARNING) << "Linear solver failure. Failed to compute a finite step.";
+ linear_solver_summary.termination_type = FAILURE;
+ } else {
+ VectorRef(step, num_parameters) *= -1.0;
+ }
+
+ reuse_diagonal_ = true;
+
+ TrustRegionStrategy::Summary summary;
+ summary.residual_norm = linear_solver_summary.residual_norm;
+ summary.num_iterations = linear_solver_summary.num_iterations;
+ summary.termination_type = linear_solver_summary.termination_type;
+ return summary;
+}
+
+void LevenbergMarquardtStrategy::StepAccepted(double step_quality) {
+ CHECK_GT(step_quality, 0.0);
+ radius_ = radius_ / std::max(1.0 / 3.0,
+ 1.0 - pow(2.0 * step_quality - 1.0, 3));
+ radius_ = std::min(max_radius_, radius_);
+ decrease_factor_ = 2.0;
+ reuse_diagonal_ = false;
+}
+
+void LevenbergMarquardtStrategy::StepRejected(double step_quality) {
+ radius_ = radius_ / decrease_factor_;
+ decrease_factor_ *= 2.0;
+ reuse_diagonal_ = true;
+}
+
+double LevenbergMarquardtStrategy::Radius() const {
+ return radius_;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/levenberg_marquardt_strategy.h b/internal/ceres/levenberg_marquardt_strategy.h
new file mode 100644
index 0000000..90c2178
--- /dev/null
+++ b/internal/ceres/levenberg_marquardt_strategy.h
@@ -0,0 +1,86 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_LEVENBERG_MARQUARDT_STRATEGY_H_
+#define CERES_INTERNAL_LEVENBERG_MARQUARDT_STRATEGY_H_
+
+#include "ceres/internal/eigen.h"
+#include "ceres/trust_region_strategy.h"
+
+namespace ceres {
+namespace internal {
+
+// Levenberg-Marquardt step computation and trust region sizing
+// strategy based on on "Methods for Nonlinear Least Squares" by
+// K. Madsen, H.B. Nielsen and O. Tingleff. Available to download from
+//
+// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
+class LevenbergMarquardtStrategy : public TrustRegionStrategy {
+public:
+ LevenbergMarquardtStrategy(const TrustRegionStrategy::Options& options);
+ virtual ~LevenbergMarquardtStrategy();
+
+ // TrustRegionStrategy interface
+ virtual TrustRegionStrategy::Summary ComputeStep(
+ const TrustRegionStrategy::PerSolveOptions& per_solve_options,
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step);
+ virtual void StepAccepted(double step_quality);
+ virtual void StepRejected(double step_quality);
+ virtual void StepIsInvalid() {
+ // Treat the current step as a rejected step with no increase in
+ // solution quality. Since rejected steps lead to decrease in the
+ // size of the trust region, the next time ComputeStep is called,
+ // this will lead to a better conditioned system.
+ StepRejected(0.0);
+ }
+
+ virtual double Radius() const;
+
+ private:
+ LinearSolver* linear_solver_;
+ double radius_;
+ double max_radius_;
+ const double min_diagonal_;
+ const double max_diagonal_;
+ double decrease_factor_;
+ bool reuse_diagonal_;
+ Vector diagonal_; // diagonal_ = diag(J'J)
+ // Scaled copy of diagonal_. Stored here as optimization to prevent
+ // allocations in every iteration and reuse when a step fails and
+ // ComputeStep is called again.
+ Vector lm_diagonal_; // lm_diagonal_ = diagonal_ / radius_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_LEVENBERG_MARQUARDT_STRATEGY_H_
diff --git a/internal/ceres/levenberg_marquardt_strategy_test.cc b/internal/ceres/levenberg_marquardt_strategy_test.cc
new file mode 100644
index 0000000..0ce44c4
--- /dev/null
+++ b/internal/ceres/levenberg_marquardt_strategy_test.cc
@@ -0,0 +1,157 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/levenberg_marquardt_strategy.h"
+#include "ceres/linear_solver.h"
+#include "ceres/trust_region_strategy.h"
+#include "glog/logging.h"
+#include "gmock/gmock.h"
+#include "gmock/mock-log.h"
+#include "gtest/gtest.h"
+
+using testing::AllOf;
+using testing::AnyNumber;
+using testing::HasSubstr;
+using testing::ScopedMockLog;
+using testing::_;
+
+namespace ceres {
+namespace internal {
+
+const double kTolerance = 1e-16;
+
+// Linear solver that takes as input a vector and checks that the
+// caller passes the same vector as LinearSolver::PerSolveOptions.D.
+class RegularizationCheckingLinearSolver : public DenseSparseMatrixSolver {
+ public:
+ RegularizationCheckingLinearSolver(const int num_cols, const double* diagonal)
+ : num_cols_(num_cols),
+ diagonal_(diagonal) {
+ }
+
+ virtual ~RegularizationCheckingLinearSolver(){}
+
+ private:
+ virtual LinearSolver::Summary SolveImpl(
+ DenseSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) {
+ CHECK_NOTNULL(per_solve_options.D);
+ for (int i = 0; i < num_cols_; ++i) {
+ EXPECT_NEAR(per_solve_options.D[i], diagonal_[i], kTolerance)
+ << i << " " << per_solve_options.D[i] << " " << diagonal_[i];
+ }
+ return LinearSolver::Summary();
+ }
+
+ const int num_cols_;
+ const double* diagonal_;
+};
+
+TEST(LevenbergMarquardtStrategy, AcceptRejectStepRadiusScaling) {
+ TrustRegionStrategy::Options options;
+ options.initial_radius = 2.0;
+ options.max_radius = 20.0;
+ options.lm_min_diagonal = 1e-8;
+ options.lm_max_diagonal = 1e8;
+
+ // We need a non-null pointer here, so anything should do.
+ scoped_ptr<LinearSolver> linear_solver(
+ new RegularizationCheckingLinearSolver(0, NULL));
+ options.linear_solver = linear_solver.get();
+
+ LevenbergMarquardtStrategy lms(options);
+ EXPECT_EQ(lms.Radius(), options.initial_radius);
+ lms.StepRejected(0.0);
+ EXPECT_EQ(lms.Radius(), 1.0);
+ lms.StepRejected(-1.0);
+ EXPECT_EQ(lms.Radius(), 0.25);
+ lms.StepAccepted(1.0);
+ EXPECT_EQ(lms.Radius(), 0.25 * 3.0);
+ lms.StepAccepted(1.0);
+ EXPECT_EQ(lms.Radius(), 0.25 * 3.0 * 3.0);
+ lms.StepAccepted(0.25);
+ EXPECT_EQ(lms.Radius(), 0.25 * 3.0 * 3.0 / 1.125);
+ lms.StepAccepted(1.0);
+ EXPECT_EQ(lms.Radius(), 0.25 * 3.0 * 3.0 / 1.125 * 3.0);
+ lms.StepAccepted(1.0);
+ EXPECT_EQ(lms.Radius(), 0.25 * 3.0 * 3.0 / 1.125 * 3.0 * 3.0);
+ lms.StepAccepted(1.0);
+ EXPECT_EQ(lms.Radius(), options.max_radius);
+}
+
+TEST(LevenbergMarquardtStrategy, CorrectDiagonalToLinearSolver) {
+ Matrix jacobian(2,3);
+ jacobian.setZero();
+ jacobian(0,0) = 0.0;
+ jacobian(0,1) = 1.0;
+ jacobian(1,1) = 1.0;
+ jacobian(0,2) = 100.0;
+
+ double residual = 1.0;
+ double x[3];
+ DenseSparseMatrix dsm(jacobian);
+
+ TrustRegionStrategy::Options options;
+ options.initial_radius = 2.0;
+ options.max_radius = 20.0;
+ options.lm_min_diagonal = 1e-2;
+ options.lm_max_diagonal = 1e2;
+
+ double diagonal[3];
+ diagonal[0] = options.lm_min_diagonal;
+ diagonal[1] = 2.0;
+ diagonal[2] = options.lm_max_diagonal;
+ for (int i = 0; i < 3; ++i) {
+ diagonal[i] = sqrt(diagonal[i] / options.initial_radius);
+ }
+
+ RegularizationCheckingLinearSolver linear_solver(3, diagonal);
+ options.linear_solver = &linear_solver;
+
+ LevenbergMarquardtStrategy lms(options);
+ TrustRegionStrategy::PerSolveOptions pso;
+
+ {
+ ScopedMockLog log;
+ EXPECT_CALL(log, Log(_, _, _)).Times(AnyNumber());
+ EXPECT_CALL(log, Log(WARNING, _,
+ HasSubstr("Failed to compute a finite step.")));
+
+ TrustRegionStrategy::Summary summary = lms.ComputeStep(pso, &dsm, &residual, x);
+ EXPECT_EQ(summary.termination_type, FAILURE);
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/linear_least_squares_problems.cc b/internal/ceres/linear_least_squares_problems.cc
new file mode 100644
index 0000000..3e3bcd0
--- /dev/null
+++ b/internal/ceres/linear_least_squares_problems.cc
@@ -0,0 +1,769 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/linear_least_squares_problems.h"
+
+#include <cstdio>
+#include <string>
+#include <vector>
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/file.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/matrix_proto.h"
+#include "ceres/stringprintf.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+LinearLeastSquaresProblem* CreateLinearLeastSquaresProblemFromId(int id) {
+ switch (id) {
+ case 0:
+ return LinearLeastSquaresProblem0();
+ case 1:
+ return LinearLeastSquaresProblem1();
+ case 2:
+ return LinearLeastSquaresProblem2();
+ case 3:
+ return LinearLeastSquaresProblem3();
+ default:
+ LOG(FATAL) << "Unknown problem id requested " << id;
+ }
+ return NULL;
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+LinearLeastSquaresProblem* CreateLinearLeastSquaresProblemFromFile(
+ const string& filename) {
+ LinearLeastSquaresProblemProto problem_proto;
+ {
+ string serialized_proto;
+ ReadFileToStringOrDie(filename, &serialized_proto);
+ CHECK(problem_proto.ParseFromString(serialized_proto));
+ }
+
+ LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+ const SparseMatrixProto& A = problem_proto.a();
+
+ if (A.has_block_matrix()) {
+ problem->A.reset(new BlockSparseMatrix(A));
+ } else if (A.has_triplet_matrix()) {
+ problem->A.reset(new TripletSparseMatrix(A));
+ } else {
+ problem->A.reset(new CompressedRowSparseMatrix(A));
+ }
+
+ if (problem_proto.b_size() > 0) {
+ problem->b.reset(new double[problem_proto.b_size()]);
+ for (int i = 0; i < problem_proto.b_size(); ++i) {
+ problem->b[i] = problem_proto.b(i);
+ }
+ }
+
+ if (problem_proto.d_size() > 0) {
+ problem->D.reset(new double[problem_proto.d_size()]);
+ for (int i = 0; i < problem_proto.d_size(); ++i) {
+ problem->D[i] = problem_proto.d(i);
+ }
+ }
+
+ if (problem_proto.d_size() > 0) {
+ if (problem_proto.x_size() > 0) {
+ problem->x_D.reset(new double[problem_proto.x_size()]);
+ for (int i = 0; i < problem_proto.x_size(); ++i) {
+ problem->x_D[i] = problem_proto.x(i);
+ }
+ }
+ } else {
+ if (problem_proto.x_size() > 0) {
+ problem->x.reset(new double[problem_proto.x_size()]);
+ for (int i = 0; i < problem_proto.x_size(); ++i) {
+ problem->x[i] = problem_proto.x(i);
+ }
+ }
+ }
+
+ problem->num_eliminate_blocks = 0;
+ if (problem_proto.has_num_eliminate_blocks()) {
+ problem->num_eliminate_blocks = problem_proto.num_eliminate_blocks();
+ }
+
+ return problem;
+}
+#else
+LinearLeastSquaresProblem* CreateLinearLeastSquaresProblemFromFile(
+ const string& filename) {
+ LOG(FATAL)
+ << "Loading a least squares problem from disk requires "
+ << "Ceres to be built with Protocol Buffers support.";
+ return NULL;
+}
+#endif // CERES_NO_PROTOCOL_BUFFERS
+
+/*
+A = [1 2]
+ [3 4]
+ [6 -10]
+
+b = [ 8
+ 18
+ -18]
+
+x = [2
+ 3]
+
+D = [1
+ 2]
+
+x_D = [1.78448275;
+ 2.82327586;]
+ */
+LinearLeastSquaresProblem* LinearLeastSquaresProblem0() {
+ LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+
+ TripletSparseMatrix* A = new TripletSparseMatrix(3, 2, 6);
+ problem->b.reset(new double[3]);
+ problem->D.reset(new double[2]);
+
+ problem->x.reset(new double[2]);
+ problem->x_D.reset(new double[2]);
+
+ int* Ai = A->mutable_rows();
+ int* Aj = A->mutable_cols();
+ double* Ax = A->mutable_values();
+
+ int counter = 0;
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j< 2; ++j) {
+ Ai[counter]=i;
+ Aj[counter]=j;
+ ++counter;
+ }
+ };
+
+ Ax[0] = 1.;
+ Ax[1] = 2.;
+ Ax[2] = 3.;
+ Ax[3] = 4.;
+ Ax[4] = 6;
+ Ax[5] = -10;
+ A->set_num_nonzeros(6);
+ problem->A.reset(A);
+
+ problem->b[0] = 8;
+ problem->b[1] = 18;
+ problem->b[2] = -18;
+
+ problem->x[0] = 2.0;
+ problem->x[1] = 3.0;
+
+ problem->D[0] = 1;
+ problem->D[1] = 2;
+
+ problem->x_D[0] = 1.78448275;
+ problem->x_D[1] = 2.82327586;
+ return problem;
+}
+
+
+/*
+ A = [1 0 | 2 0 0
+ 3 0 | 0 4 0
+ 0 5 | 0 0 6
+ 0 7 | 8 0 0
+ 0 9 | 1 0 0
+ 0 0 | 1 1 1]
+
+ b = [0
+ 1
+ 2
+ 3
+ 4
+ 5]
+
+ c = A'* b = [ 3
+ 67
+ 33
+ 9
+ 17]
+
+ A'A = [10 0 2 12 0
+ 0 155 65 0 30
+ 2 65 70 1 1
+ 12 0 1 17 1
+ 0 30 1 1 37]
+
+ S = [ 42.3419 -1.4000 -11.5806
+ -1.4000 2.6000 1.0000
+ 11.5806 1.0000 31.1935]
+
+ r = [ 4.3032
+ 5.4000
+ 5.0323]
+
+ S\r = [ 0.2102
+ 2.1367
+ 0.1388]
+
+ A\b = [-2.3061
+ 0.3172
+ 0.2102
+ 2.1367
+ 0.1388]
+*/
+// The following two functions create a TripletSparseMatrix and a
+// BlockSparseMatrix version of this problem.
+
+// TripletSparseMatrix version.
+LinearLeastSquaresProblem* LinearLeastSquaresProblem1() {
+ int num_rows = 6;
+ int num_cols = 5;
+
+ LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+ TripletSparseMatrix* A = new TripletSparseMatrix(num_rows,
+ num_cols,
+ num_rows * num_cols);
+ problem->b.reset(new double[num_rows]);
+ problem->D.reset(new double[num_cols]);
+ problem->num_eliminate_blocks = 2;
+
+ int* rows = A->mutable_rows();
+ int* cols = A->mutable_cols();
+ double* values = A->mutable_values();
+
+ int nnz = 0;
+
+ // Row 1
+ {
+ rows[nnz] = 0;
+ cols[nnz] = 0;
+ values[nnz++] = 1;
+
+ rows[nnz] = 0;
+ cols[nnz] = 2;
+ values[nnz++] = 2;
+ }
+
+ // Row 2
+ {
+ rows[nnz] = 1;
+ cols[nnz] = 0;
+ values[nnz++] = 3;
+
+ rows[nnz] = 1;
+ cols[nnz] = 3;
+ values[nnz++] = 4;
+ }
+
+ // Row 3
+ {
+ rows[nnz] = 2;
+ cols[nnz] = 1;
+ values[nnz++] = 5;
+
+ rows[nnz] = 2;
+ cols[nnz] = 4;
+ values[nnz++] = 6;
+ }
+
+ // Row 4
+ {
+ rows[nnz] = 3;
+ cols[nnz] = 1;
+ values[nnz++] = 7;
+
+ rows[nnz] = 3;
+ cols[nnz] = 2;
+ values[nnz++] = 8;
+ }
+
+ // Row 5
+ {
+ rows[nnz] = 4;
+ cols[nnz] = 1;
+ values[nnz++] = 9;
+
+ rows[nnz] = 4;
+ cols[nnz] = 2;
+ values[nnz++] = 1;
+ }
+
+ // Row 6
+ {
+ rows[nnz] = 5;
+ cols[nnz] = 2;
+ values[nnz++] = 1;
+
+ rows[nnz] = 5;
+ cols[nnz] = 3;
+ values[nnz++] = 1;
+
+ rows[nnz] = 5;
+ cols[nnz] = 4;
+ values[nnz++] = 1;
+ }
+
+ A->set_num_nonzeros(nnz);
+ CHECK(A->IsValid());
+
+ problem->A.reset(A);
+
+ for (int i = 0; i < num_cols; ++i) {
+ problem->D.get()[i] = 1;
+ }
+
+ for (int i = 0; i < num_rows; ++i) {
+ problem->b.get()[i] = i;
+ }
+
+ return problem;
+}
+
+// BlockSparseMatrix version
+LinearLeastSquaresProblem* LinearLeastSquaresProblem2() {
+ int num_rows = 6;
+ int num_cols = 5;
+
+ LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+
+ problem->b.reset(new double[num_rows]);
+ problem->D.reset(new double[num_cols]);
+ problem->num_eliminate_blocks = 2;
+
+ CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+ scoped_array<double> values(new double[num_rows * num_cols]);
+
+ for (int c = 0; c < num_cols; ++c) {
+ bs->cols.push_back(Block());
+ bs->cols.back().size = 1;
+ bs->cols.back().position = c;
+ }
+
+ int nnz = 0;
+
+ // Row 1
+ {
+ values[nnz++] = 1;
+ values[nnz++] = 2;
+
+ bs->rows.push_back(CompressedRow());
+ CompressedRow& row = bs->rows.back();
+ row.block.size = 1;
+ row.block.position = 0;
+ row.cells.push_back(Cell(0, 0));
+ row.cells.push_back(Cell(2, 1));
+ }
+
+ // Row 2
+ {
+ values[nnz++] = 3;
+ values[nnz++] = 4;
+
+ bs->rows.push_back(CompressedRow());
+ CompressedRow& row = bs->rows.back();
+ row.block.size = 1;
+ row.block.position = 1;
+ row.cells.push_back(Cell(0, 2));
+ row.cells.push_back(Cell(3, 3));
+ }
+
+ // Row 3
+ {
+ values[nnz++] = 5;
+ values[nnz++] = 6;
+
+ bs->rows.push_back(CompressedRow());
+ CompressedRow& row = bs->rows.back();
+ row.block.size = 1;
+ row.block.position = 2;
+ row.cells.push_back(Cell(1, 4));
+ row.cells.push_back(Cell(4, 5));
+ }
+
+ // Row 4
+ {
+ values[nnz++] = 7;
+ values[nnz++] = 8;
+
+ bs->rows.push_back(CompressedRow());
+ CompressedRow& row = bs->rows.back();
+ row.block.size = 1;
+ row.block.position = 3;
+ row.cells.push_back(Cell(1, 6));
+ row.cells.push_back(Cell(2, 7));
+ }
+
+ // Row 5
+ {
+ values[nnz++] = 9;
+ values[nnz++] = 1;
+
+ bs->rows.push_back(CompressedRow());
+ CompressedRow& row = bs->rows.back();
+ row.block.size = 1;
+ row.block.position = 4;
+ row.cells.push_back(Cell(1, 8));
+ row.cells.push_back(Cell(2, 9));
+ }
+
+ // Row 6
+ {
+ values[nnz++] = 1;
+ values[nnz++] = 1;
+ values[nnz++] = 1;
+
+ bs->rows.push_back(CompressedRow());
+ CompressedRow& row = bs->rows.back();
+ row.block.size = 1;
+ row.block.position = 5;
+ row.cells.push_back(Cell(2, 10));
+ row.cells.push_back(Cell(3, 11));
+ row.cells.push_back(Cell(4, 12));
+ }
+
+ BlockSparseMatrix* A = new BlockSparseMatrix(bs);
+ memcpy(A->mutable_values(), values.get(), nnz * sizeof(*A->values()));
+
+ for (int i = 0; i < num_cols; ++i) {
+ problem->D.get()[i] = 1;
+ }
+
+ for (int i = 0; i < num_rows; ++i) {
+ problem->b.get()[i] = i;
+ }
+
+ problem->A.reset(A);
+
+ return problem;
+}
+
+
+/*
+ A = [1 0
+ 3 0
+ 0 5
+ 0 7
+ 0 9
+ 0 0]
+
+ b = [0
+ 1
+ 2
+ 3
+ 4
+ 5]
+*/
+// BlockSparseMatrix version
+LinearLeastSquaresProblem* LinearLeastSquaresProblem3() {
+ int num_rows = 5;
+ int num_cols = 2;
+
+ LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
+
+ problem->b.reset(new double[num_rows]);
+ problem->D.reset(new double[num_cols]);
+ problem->num_eliminate_blocks = 2;
+
+ CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
+ scoped_array<double> values(new double[num_rows * num_cols]);
+
+ for (int c = 0; c < num_cols; ++c) {
+ bs->cols.push_back(Block());
+ bs->cols.back().size = 1;
+ bs->cols.back().position = c;
+ }
+
+ int nnz = 0;
+
+ // Row 1
+ {
+ values[nnz++] = 1;
+ bs->rows.push_back(CompressedRow());
+ CompressedRow& row = bs->rows.back();
+ row.block.size = 1;
+ row.block.position = 0;
+ row.cells.push_back(Cell(0, 0));
+ }
+
+ // Row 2
+ {
+ values[nnz++] = 3;
+ bs->rows.push_back(CompressedRow());
+ CompressedRow& row = bs->rows.back();
+ row.block.size = 1;
+ row.block.position = 1;
+ row.cells.push_back(Cell(0, 1));
+ }
+
+ // Row 3
+ {
+ values[nnz++] = 5;
+ bs->rows.push_back(CompressedRow());
+ CompressedRow& row = bs->rows.back();
+ row.block.size = 1;
+ row.block.position = 2;
+ row.cells.push_back(Cell(1, 2));
+ }
+
+ // Row 4
+ {
+ values[nnz++] = 7;
+ bs->rows.push_back(CompressedRow());
+ CompressedRow& row = bs->rows.back();
+ row.block.size = 1;
+ row.block.position = 3;
+ row.cells.push_back(Cell(1, 3));
+ }
+
+ // Row 5
+ {
+ values[nnz++] = 9;
+ bs->rows.push_back(CompressedRow());
+ CompressedRow& row = bs->rows.back();
+ row.block.size = 1;
+ row.block.position = 4;
+ row.cells.push_back(Cell(1, 4));
+ }
+
+ BlockSparseMatrix* A = new BlockSparseMatrix(bs);
+ memcpy(A->mutable_values(), values.get(), nnz * sizeof(*A->values()));
+
+ for (int i = 0; i < num_cols; ++i) {
+ problem->D.get()[i] = 1;
+ }
+
+ for (int i = 0; i < num_rows; ++i) {
+ problem->b.get()[i] = i;
+ }
+
+ problem->A.reset(A);
+
+ return problem;
+}
+
+bool DumpLinearLeastSquaresProblemToConsole(const string& directory,
+ int iteration,
+ const SparseMatrix* A,
+ const double* D,
+ const double* b,
+ const double* x,
+ int num_eliminate_blocks) {
+ CHECK_NOTNULL(A);
+ Matrix AA;
+ A->ToDenseMatrix(&AA);
+ LOG(INFO) << "A^T: \n" << AA.transpose();
+
+ if (D != NULL) {
+ LOG(INFO) << "A's appended diagonal:\n"
+ << ConstVectorRef(D, A->num_cols());
+ }
+
+ if (b != NULL) {
+ LOG(INFO) << "b: \n" << ConstVectorRef(b, A->num_rows());
+ }
+
+ if (x != NULL) {
+ LOG(INFO) << "x: \n" << ConstVectorRef(x, A->num_cols());
+ }
+ return true;
+};
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
+ int iteration,
+ const SparseMatrix* A,
+ const double* D,
+ const double* b,
+ const double* x,
+ int num_eliminate_blocks) {
+ CHECK_NOTNULL(A);
+ LinearLeastSquaresProblemProto lsqp;
+ A->ToProto(lsqp.mutable_a());
+
+ if (D != NULL) {
+ for (int i = 0; i < A->num_cols(); ++i) {
+ lsqp.add_d(D[i]);
+ }
+ }
+
+ if (b != NULL) {
+ for (int i = 0; i < A->num_rows(); ++i) {
+ lsqp.add_b(b[i]);
+ }
+ }
+
+ if (x != NULL) {
+ for (int i = 0; i < A->num_cols(); ++i) {
+ lsqp.add_x(x[i]);
+ }
+ }
+
+ lsqp.set_num_eliminate_blocks(num_eliminate_blocks);
+ string format_string = JoinPath(directory,
+ "lm_iteration_%03d.lsqp");
+ string filename =
+ StringPrintf(format_string.c_str(), iteration);
+ LOG(INFO) << "Dumping least squares problem for iteration " << iteration
+ << " to disk. File: " << filename;
+ WriteStringToFileOrDie(lsqp.SerializeAsString(), filename);
+ return true;
+}
+#else
+bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
+ int iteration,
+ const SparseMatrix* A,
+ const double* D,
+ const double* b,
+ const double* x,
+ int num_eliminate_blocks) {
+ LOG(ERROR) << "Dumping least squares problems is only "
+ << "supported when Ceres is compiled with "
+ << "protocol buffer support.";
+ return false;
+}
+#endif
+
+void WriteArrayToFileOrDie(const string& filename,
+ const double* x,
+ const int size) {
+ CHECK_NOTNULL(x);
+ VLOG(2) << "Writing array to: " << filename;
+ FILE* fptr = fopen(filename.c_str(), "w");
+ CHECK_NOTNULL(fptr);
+ for (int i = 0; i < size; ++i) {
+ fprintf(fptr, "%17f\n", x[i]);
+ }
+ fclose(fptr);
+}
+
+bool DumpLinearLeastSquaresProblemToTextFile(const string& directory,
+ int iteration,
+ const SparseMatrix* A,
+ const double* D,
+ const double* b,
+ const double* x,
+ int num_eliminate_blocks) {
+ CHECK_NOTNULL(A);
+ string format_string = JoinPath(directory,
+ "lm_iteration_%03d");
+ string filename_prefix =
+ StringPrintf(format_string.c_str(), iteration);
+
+ LOG(INFO) << "writing to: " << filename_prefix << "*";
+
+ string matlab_script;
+ StringAppendF(&matlab_script,
+ "function lsqp = lm_iteration_%03d()\n", iteration);
+ StringAppendF(&matlab_script,
+ "lsqp.num_rows = %d;\n", A->num_rows());
+ StringAppendF(&matlab_script,
+ "lsqp.num_cols = %d;\n", A->num_cols());
+
+ {
+ string filename = filename_prefix + "_A.txt";
+ FILE* fptr = fopen(filename.c_str(), "w");
+ CHECK_NOTNULL(fptr);
+ A->ToTextFile(fptr);
+ fclose(fptr);
+ StringAppendF(&matlab_script,
+ "tmp = load('%s', '-ascii');\n", filename.c_str());
+ StringAppendF(
+ &matlab_script,
+ "lsqp.A = sparse(tmp(:, 1) + 1, tmp(:, 2) + 1, tmp(:, 3), %d, %d);\n",
+ A->num_rows(),
+ A->num_cols());
+ }
+
+
+ if (D != NULL) {
+ string filename = filename_prefix + "_D.txt";
+ WriteArrayToFileOrDie(filename, D, A->num_cols());
+ StringAppendF(&matlab_script,
+ "lsqp.D = load('%s', '-ascii');\n", filename.c_str());
+ }
+
+ if (b != NULL) {
+ string filename = filename_prefix + "_b.txt";
+ WriteArrayToFileOrDie(filename, b, A->num_rows());
+ StringAppendF(&matlab_script,
+ "lsqp.b = load('%s', '-ascii');\n", filename.c_str());
+ }
+
+ if (x != NULL) {
+ string filename = filename_prefix + "_x.txt";
+ WriteArrayToFileOrDie(filename, x, A->num_cols());
+ StringAppendF(&matlab_script,
+ "lsqp.x = load('%s', '-ascii');\n", filename.c_str());
+ }
+
+ string matlab_filename = filename_prefix + ".m";
+ WriteStringToFileOrDie(matlab_script, matlab_filename);
+ return true;
+}
+
+bool DumpLinearLeastSquaresProblem(const string& directory,
+ int iteration,
+ DumpFormatType dump_format_type,
+ const SparseMatrix* A,
+ const double* D,
+ const double* b,
+ const double* x,
+ int num_eliminate_blocks) {
+ switch (dump_format_type) {
+ case (CONSOLE):
+ return DumpLinearLeastSquaresProblemToConsole(directory,
+ iteration,
+ A, D, b, x,
+ num_eliminate_blocks);
+ case (PROTOBUF):
+ return DumpLinearLeastSquaresProblemToProtocolBuffer(
+ directory,
+ iteration,
+ A, D, b, x,
+ num_eliminate_blocks);
+ case (TEXTFILE):
+ return DumpLinearLeastSquaresProblemToTextFile(directory,
+ iteration,
+ A, D, b, x,
+ num_eliminate_blocks);
+ default:
+ LOG(FATAL) << "Unknown DumpFormatType " << dump_format_type;
+ };
+
+ return true;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/linear_least_squares_problems.h b/internal/ceres/linear_least_squares_problems.h
new file mode 100644
index 0000000..553cc0d
--- /dev/null
+++ b/internal/ceres/linear_least_squares_problems.h
@@ -0,0 +1,87 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_LINEAR_LEAST_SQUARES_PROBLEMS_H_
+#define CERES_INTERNAL_LINEAR_LEAST_SQUARES_PROBLEMS_H_
+
+#include <string>
+#include <vector>
+#include "ceres/sparse_matrix.h"
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+// Structure defining a linear least squares problem and if possible
+// ground truth solutions. To be used by various LinearSolver tests.
+struct LinearLeastSquaresProblem {
+ LinearLeastSquaresProblem()
+ : A(NULL), b(NULL), D(NULL), num_eliminate_blocks(0),
+ x(NULL), x_D(NULL) {
+ }
+
+ scoped_ptr<SparseMatrix> A;
+ scoped_array<double> b;
+ scoped_array<double> D;
+ // If using the schur eliminator then how many of the variable
+ // blocks are e_type blocks.
+ int num_eliminate_blocks;
+
+ // Solution to min_x |Ax - b|^2
+ scoped_array<double> x;
+ // Solution to min_x |Ax - b|^2 + |Dx|^2
+ scoped_array<double> x_D;
+};
+
+// Factories for linear least squares problem.
+LinearLeastSquaresProblem* CreateLinearLeastSquaresProblemFromId(int id);
+LinearLeastSquaresProblem* CreateLinearLeastSquaresProblemFromFile(
+ const string& filename);
+
+LinearLeastSquaresProblem* LinearLeastSquaresProblem0();
+LinearLeastSquaresProblem* LinearLeastSquaresProblem1();
+LinearLeastSquaresProblem* LinearLeastSquaresProblem2();
+LinearLeastSquaresProblem* LinearLeastSquaresProblem3();
+
+// Write the linear least squares problem to disk. The exact format
+// depends on dump_format_type.
+bool DumpLinearLeastSquaresProblem(const string& directory,
+ int iteration,
+ DumpFormatType dump_format_type,
+ const SparseMatrix* A,
+ const double* D,
+ const double* b,
+ const double* x,
+ int num_eliminate_blocks);
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_LINEAR_LEAST_SQUARES_PROBLEMS_H_
diff --git a/internal/ceres/linear_operator.cc b/internal/ceres/linear_operator.cc
new file mode 100644
index 0000000..4b59fa1
--- /dev/null
+++ b/internal/ceres/linear_operator.cc
@@ -0,0 +1,40 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/linear_operator.h"
+
+namespace ceres {
+namespace internal {
+
+LinearOperator::~LinearOperator() {
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/linear_operator.h b/internal/ceres/linear_operator.h
new file mode 100644
index 0000000..d5c15ce
--- /dev/null
+++ b/internal/ceres/linear_operator.h
@@ -0,0 +1,59 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Base classes for access to an linear operator.
+
+#ifndef CERES_INTERNAL_LINEAR_OPERATOR_H_
+#define CERES_INTERNAL_LINEAR_OPERATOR_H_
+
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+// This is an abstract base class for linear operators. It supports
+// access to size information and left and right multiply operators.
+class LinearOperator {
+ public:
+ virtual ~LinearOperator();
+
+ // y = y + Ax;
+ virtual void RightMultiply(const double* x, double* y) const = 0;
+ // y = y + A'x;
+ virtual void LeftMultiply(const double* x, double* y) const = 0;
+
+ virtual int num_rows() const = 0;
+ virtual int num_cols() const = 0;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_LINEAR_OPERATOR_H_
diff --git a/internal/ceres/linear_solver.cc b/internal/ceres/linear_solver.cc
new file mode 100644
index 0000000..08c3ba1
--- /dev/null
+++ b/internal/ceres/linear_solver.cc
@@ -0,0 +1,93 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/linear_solver.h"
+
+#include "ceres/cgnr_solver.h"
+#include "ceres/dense_normal_cholesky_solver.h"
+#include "ceres/dense_qr_solver.h"
+#include "ceres/iterative_schur_complement_solver.h"
+#include "ceres/schur_complement_solver.h"
+#include "ceres/sparse_normal_cholesky_solver.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+LinearSolver::~LinearSolver() {
+}
+
+LinearSolver* LinearSolver::Create(const LinearSolver::Options& options) {
+ switch (options.type) {
+ case CGNR:
+ return new CgnrSolver(options);
+
+ case SPARSE_NORMAL_CHOLESKY:
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
+ LOG(WARNING) << "SPARSE_NORMAL_CHOLESKY is not available. Please "
+ << "build Ceres with SuiteSparse or CXSparse. "
+ << "Returning NULL.";
+ return NULL;
+#else
+ return new SparseNormalCholeskySolver(options);
+#endif
+
+ case SPARSE_SCHUR:
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
+ LOG(WARNING) << "SPARSE_SCHUR is not available. Please "
+ << "build Ceres with SuiteSparse or CXSparse. "
+ << "Returning NULL.";
+ return NULL;
+#else
+ return new SparseSchurComplementSolver(options);
+#endif
+
+ case DENSE_SCHUR:
+ return new DenseSchurComplementSolver(options);
+
+ case ITERATIVE_SCHUR:
+ return new IterativeSchurComplementSolver(options);
+
+ case DENSE_QR:
+ return new DenseQRSolver(options);
+
+ case DENSE_NORMAL_CHOLESKY:
+ return new DenseNormalCholeskySolver(options);
+
+ default:
+ LOG(FATAL) << "Unknown linear solver type :"
+ << options.type;
+ return NULL; // MSVC doesn't understand that LOG(FATAL) never returns.
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/linear_solver.h b/internal/ceres/linear_solver.h
new file mode 100644
index 0000000..ee7fce2
--- /dev/null
+++ b/internal/ceres/linear_solver.h
@@ -0,0 +1,303 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Abstract interface for objects solving linear systems of various
+// kinds.
+
+#ifndef CERES_INTERNAL_LINEAR_SOLVER_H_
+#define CERES_INTERNAL_LINEAR_SOLVER_H_
+
+#include <cstddef>
+#include <vector>
+
+#include <glog/logging.h>
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/dense_sparse_matrix.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class LinearOperator;
+
+// Abstract base class for objects that implement algorithms for
+// solving linear systems
+//
+// Ax = b
+//
+// It is expected that a single instance of a LinearSolver object
+// maybe used multiple times for solving multiple linear systems with
+// the same sparsity structure. This allows them to cache and reuse
+// information across solves. This means that calling Solve on the
+// same LinearSolver instance with two different linear systems will
+// result in undefined behaviour.
+//
+// Subclasses of LinearSolver use two structs to configure themselves.
+// The Options struct configures the LinearSolver object for its
+// lifetime. The PerSolveOptions struct is used to specify options for
+// a particular Solve call.
+class LinearSolver {
+ public:
+ struct Options {
+ Options()
+ : type(SPARSE_NORMAL_CHOLESKY),
+ preconditioner_type(JACOBI),
+ sparse_linear_algebra_library(SUITE_SPARSE),
+ use_block_amd(true),
+ min_num_iterations(1),
+ max_num_iterations(1),
+ num_threads(1),
+ residual_reset_period(10),
+ row_block_size(Dynamic),
+ e_block_size(Dynamic),
+ f_block_size(Dynamic) {
+ }
+
+ LinearSolverType type;
+
+ PreconditionerType preconditioner_type;
+
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
+
+ // See solver.h for explanation of this option.
+ bool use_block_amd;
+
+ // Number of internal iterations that the solver uses. This
+ // parameter only makes sense for iterative solvers like CG.
+ int min_num_iterations;
+ int max_num_iterations;
+
+ // If possible, how many threads can the solver use.
+ int num_threads;
+
+ // Hints about the order in which the parameter blocks should be
+ // eliminated by the linear solver.
+ //
+ // For example if elimination_groups is a vector of size k, then
+ // the linear solver is informed that it should eliminate the
+ // parameter blocks 0 - elimination_groups[0] - 1 first, and then
+ // elimination_groups[0] - elimination_groups[1] and so on. Within
+ // each elimination group, the linear solver is free to choose how
+ // the parameter blocks are ordered. Different linear solvers have
+ // differing requirements on elimination_groups.
+ //
+ // The most common use is for Schur type solvers, where there
+ // should be at least two elimination groups and the first
+ // elimination group must form an independent set in the normal
+ // equations. The first elimination group corresponds to the
+ // num_eliminate_blocks in the Schur type solvers.
+ vector<int> elimination_groups;
+
+ // Iterative solvers, e.g. Preconditioned Conjugate Gradients
+ // maintain a cheap estimate of the residual which may become
+ // inaccurate over time. Thus for non-zero values of this
+ // parameter, the solver can be told to recalculate the value of
+ // the residual using a |b - Ax| evaluation.
+ int residual_reset_period;
+
+ // If the block sizes in a BlockSparseMatrix are fixed, then in
+ // some cases the Schur complement based solvers can detect and
+ // specialize on them.
+ //
+ // It is expected that these parameters are set programmatically
+ // rather than manually.
+ //
+ // Please see schur_complement_solver.h and schur_eliminator.h for
+ // more details.
+ int row_block_size;
+ int e_block_size;
+ int f_block_size;
+ };
+
+ // Options for the Solve method.
+ struct PerSolveOptions {
+ PerSolveOptions()
+ : D(NULL),
+ preconditioner(NULL),
+ r_tolerance(0.0),
+ q_tolerance(0.0) {
+ }
+
+ // This option only makes sense for unsymmetric linear solvers
+ // that can solve rectangular linear systems.
+ //
+ // Given a matrix A, an optional diagonal matrix D as a vector,
+ // and a vector b, the linear solver will solve for
+ //
+ // | A | x = | b |
+ // | D | | 0 |
+ //
+ // If D is null, then it is treated as zero, and the solver returns
+ // the solution to
+ //
+ // A x = b
+ //
+ // In either case, x is the vector that solves the following
+ // optimization problem.
+ //
+ // arg min_x ||Ax - b||^2 + ||Dx||^2
+ //
+ // Here A is a matrix of size m x n, with full column rank. If A
+ // does not have full column rank, the results returned by the
+ // solver cannot be relied on. D, if it is not null is an array of
+ // size n. b is an array of size m and x is an array of size n.
+ double * D;
+
+ // This option only makes sense for iterative solvers.
+ //
+ // In general the performance of an iterative linear solver
+ // depends on the condition number of the matrix A. For example
+ // the convergence rate of the conjugate gradients algorithm
+ // is proportional to the square root of the condition number.
+ //
+ // One particularly useful technique for improving the
+ // conditioning of a linear system is to precondition it. In its
+ // simplest form a preconditioner is a matrix M such that instead
+ // of solving Ax = b, we solve the linear system AM^{-1} y = b
+ // instead, where M is such that the condition number k(AM^{-1})
+ // is smaller than the conditioner k(A). Given the solution to
+ // this system, x = M^{-1} y. The iterative solver takes care of
+ // the mechanics of solving the preconditioned system and
+ // returning the corrected solution x. The user only needs to
+ // supply a linear operator.
+ //
+ // A null preconditioner is equivalent to an identity matrix being
+ // used a preconditioner.
+ LinearOperator* preconditioner;
+
+
+ // The following tolerance related options only makes sense for
+ // iterative solvers. Direct solvers ignore them.
+
+ // Solver terminates when
+ //
+ // |Ax - b| <= r_tolerance * |b|.
+ //
+ // This is the most commonly used termination criterion for
+ // iterative solvers.
+ double r_tolerance;
+
+ // For PSD matrices A, let
+ //
+ // Q(x) = x'Ax - 2b'x
+ //
+ // be the cost of the quadratic function defined by A and b. Then,
+ // the solver terminates at iteration i if
+ //
+ // i * (Q(x_i) - Q(x_i-1)) / Q(x_i) < q_tolerance.
+ //
+ // This termination criterion is more useful when using CG to
+ // solve the Newton step. This particular convergence test comes
+ // from Stephen Nash's work on truncated Newton
+ // methods. References:
+ //
+ // 1. Stephen G. Nash & Ariela Sofer, Assessing A Search
+ // Direction Within A Truncated Newton Method, Operation
+ // Research Letters 9(1990) 219-221.
+ //
+ // 2. Stephen G. Nash, A Survey of Truncated Newton Methods,
+ // Journal of Computational and Applied Mathematics,
+ // 124(1-2), 45-59, 2000.
+ //
+ double q_tolerance;
+ };
+
+ // Summary of a call to the Solve method. We should move away from
+ // the true/false method for determining solver success. We should
+ // let the summary object do the talking.
+ struct Summary {
+ Summary()
+ : residual_norm(0.0),
+ num_iterations(-1),
+ termination_type(FAILURE) {
+ }
+
+ double residual_norm;
+ int num_iterations;
+ LinearSolverTerminationType termination_type;
+ };
+
+ virtual ~LinearSolver();
+
+ // Solve Ax = b.
+ virtual Summary Solve(LinearOperator* A,
+ const double* b,
+ const PerSolveOptions& per_solve_options,
+ double* x) = 0;
+
+ // Factory
+ static LinearSolver* Create(const Options& options);
+};
+
+// This templated subclass of LinearSolver serves as a base class for
+// other linear solvers that depend on the particular matrix layout of
+// the underlying linear operator. For example some linear solvers
+// need low level access to the TripletSparseMatrix implementing the
+// LinearOperator interface. This class hides those implementation
+// details behind a private virtual method, and has the Solve method
+// perform the necessary upcasting.
+template <typename MatrixType>
+class TypedLinearSolver : public LinearSolver {
+ public:
+ virtual ~TypedLinearSolver() {}
+ virtual LinearSolver::Summary Solve(
+ LinearOperator* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) {
+ CHECK_NOTNULL(A);
+ CHECK_NOTNULL(b);
+ CHECK_NOTNULL(x);
+ return SolveImpl(down_cast<MatrixType*>(A), b, per_solve_options, x);
+ }
+
+ private:
+ virtual LinearSolver::Summary SolveImpl(
+ MatrixType* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) = 0;
+};
+
+// Linear solvers that depend on acccess to the low level structure of
+// a SparseMatrix.
+typedef TypedLinearSolver<BlockSparseMatrix> BlockSparseMatrixSolver; // NOLINT
+typedef TypedLinearSolver<BlockSparseMatrixBase> BlockSparseMatrixBaseSolver; // NOLINT
+typedef TypedLinearSolver<CompressedRowSparseMatrix> CompressedRowSparseMatrixSolver; // NOLINT
+typedef TypedLinearSolver<DenseSparseMatrix> DenseSparseMatrixSolver; // NOLINT
+typedef TypedLinearSolver<TripletSparseMatrix> TripletSparseMatrixSolver; // NOLINT
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_LINEAR_SOLVER_H_
diff --git a/internal/ceres/local_parameterization.cc b/internal/ceres/local_parameterization.cc
new file mode 100644
index 0000000..26e7f49
--- /dev/null
+++ b/internal/ceres/local_parameterization.cc
@@ -0,0 +1,141 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/local_parameterization.h"
+
+#include "ceres/internal/eigen.h"
+#include "ceres/rotation.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+IdentityParameterization::IdentityParameterization(const int size)
+ : size_(size) {
+ CHECK_GT(size, 0);
+}
+
+bool IdentityParameterization::Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const {
+ VectorRef(x_plus_delta, size_) =
+ ConstVectorRef(x, size_) + ConstVectorRef(delta, size_);
+ return true;
+}
+
+bool IdentityParameterization::ComputeJacobian(const double* x,
+ double* jacobian) const {
+ MatrixRef(jacobian, size_, size_) = Matrix::Identity(size_, size_);
+ return true;
+}
+
+SubsetParameterization::SubsetParameterization(
+ int size,
+ const vector<int>& constant_parameters)
+ : local_size_(size - constant_parameters.size()),
+ constancy_mask_(size, 0) {
+ CHECK_GT(constant_parameters.size(), 0)
+ << "The set of constant parameters should contain at least "
+ << "one element. If you do not wish to hold any parameters "
+ << "constant, then do not use a SubsetParameterization";
+
+ vector<int> constant = constant_parameters;
+ sort(constant.begin(), constant.end());
+ CHECK(unique(constant.begin(), constant.end()) == constant.end())
+ << "The set of constant parameters cannot contain duplicates";
+ CHECK_LT(constant_parameters.size(), size)
+ << "Number of parameters held constant should be less "
+ << "than the size of the parameter block. If you wish "
+ << "to hold the entire parameter block constant, then a "
+ << "efficient way is to directly mark it as constant "
+ << "instead of using a LocalParameterization to do so.";
+ CHECK_GE(*min_element(constant.begin(), constant.end()), 0);
+ CHECK_LT(*max_element(constant.begin(), constant.end()), size);
+
+ for (int i = 0; i < constant_parameters.size(); ++i) {
+ constancy_mask_[constant_parameters[i]] = 1;
+ }
+}
+
+bool SubsetParameterization::Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const {
+ for (int i = 0, j = 0; i < constancy_mask_.size(); ++i) {
+ if (constancy_mask_[i]) {
+ x_plus_delta[i] = x[i];
+ } else {
+ x_plus_delta[i] = x[i] + delta[j++];
+ }
+ }
+ return true;
+}
+
+bool SubsetParameterization::ComputeJacobian(const double* x,
+ double* jacobian) const {
+ MatrixRef m(jacobian, constancy_mask_.size(), local_size_);
+ m.setZero();
+ for (int i = 0, j = 0; i < constancy_mask_.size(); ++i) {
+ if (!constancy_mask_[i]) {
+ m(i, j++) = 1.0;
+ }
+ }
+ return true;
+}
+
+bool QuaternionParameterization::Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const {
+ const double norm_delta =
+ sqrt(delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2]);
+ if (norm_delta > 0.0) {
+ const double sin_delta_by_delta = (sin(norm_delta) / norm_delta);
+ double q_delta[4];
+ q_delta[0] = cos(norm_delta);
+ q_delta[1] = sin_delta_by_delta * delta[0];
+ q_delta[2] = sin_delta_by_delta * delta[1];
+ q_delta[3] = sin_delta_by_delta * delta[2];
+ QuaternionProduct(q_delta, x, x_plus_delta);
+ } else {
+ for (int i = 0; i < 4; ++i) {
+ x_plus_delta[i] = x[i];
+ }
+ }
+ return true;
+}
+
+bool QuaternionParameterization::ComputeJacobian(const double* x,
+ double* jacobian) const {
+ jacobian[0] = -x[1]; jacobian[1] = -x[2]; jacobian[2] = -x[3]; // NOLINT
+ jacobian[3] = x[0]; jacobian[4] = x[3]; jacobian[5] = -x[2]; // NOLINT
+ jacobian[6] = -x[3]; jacobian[7] = x[0]; jacobian[8] = x[1]; // NOLINT
+ jacobian[9] = x[2]; jacobian[10] = -x[1]; jacobian[11] = x[0]; // NOLINT
+ return true;
+}
+
+} // namespace ceres
diff --git a/internal/ceres/local_parameterization_test.cc b/internal/ceres/local_parameterization_test.cc
new file mode 100644
index 0000000..9b775b4
--- /dev/null
+++ b/internal/ceres/local_parameterization_test.cc
@@ -0,0 +1,257 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <cmath>
+#include "ceres/fpclassify.h"
+#include "ceres/internal/autodiff.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/rotation.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(IdentityParameterization, EverythingTest) {
+ IdentityParameterization parameterization(3);
+ EXPECT_EQ(parameterization.GlobalSize(), 3);
+ EXPECT_EQ(parameterization.LocalSize(), 3);
+
+ double x[3] = {1.0, 2.0, 3.0};
+ double delta[3] = {0.0, 1.0, 2.0};
+ double x_plus_delta[3] = {0.0, 0.0, 0.0};
+ parameterization.Plus(x, delta, x_plus_delta);
+ EXPECT_EQ(x_plus_delta[0], 1.0);
+ EXPECT_EQ(x_plus_delta[1], 3.0);
+ EXPECT_EQ(x_plus_delta[2], 5.0);
+
+ double jacobian[9];
+ parameterization.ComputeJacobian(x, jacobian);
+ int k = 0;
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 3; ++j, ++k) {
+ EXPECT_EQ(jacobian[k], (i == j) ? 1.0 : 0.0);
+ }
+ }
+}
+
+TEST(SubsetParameterization, DeathTests) {
+ vector<int> constant_parameters;
+ EXPECT_DEATH_IF_SUPPORTED(
+ SubsetParameterization parameterization(1, constant_parameters),
+ "at least");
+
+ constant_parameters.push_back(0);
+ EXPECT_DEATH_IF_SUPPORTED(
+ SubsetParameterization parameterization(1, constant_parameters),
+ "Number of parameters");
+
+ constant_parameters.push_back(1);
+ EXPECT_DEATH_IF_SUPPORTED(
+ SubsetParameterization parameterization(2, constant_parameters),
+ "Number of parameters");
+
+ constant_parameters.push_back(1);
+ EXPECT_DEATH_IF_SUPPORTED(
+ SubsetParameterization parameterization(2, constant_parameters),
+ "duplicates");
+}
+
+TEST(SubsetParameterization, NormalFunctionTest) {
+ double x[4] = {1.0, 2.0, 3.0, 4.0};
+ for (int i = 0; i < 4; ++i) {
+ vector<int> constant_parameters;
+ constant_parameters.push_back(i);
+ SubsetParameterization parameterization(4, constant_parameters);
+ double delta[3] = {1.0, 2.0, 3.0};
+ double x_plus_delta[4] = {0.0, 0.0, 0.0};
+
+ parameterization.Plus(x, delta, x_plus_delta);
+ int k = 0;
+ for (int j = 0; j < 4; ++j) {
+ if (j == i) {
+ EXPECT_EQ(x_plus_delta[j], x[j]);
+ } else {
+ EXPECT_EQ(x_plus_delta[j], x[j] + delta[k++]);
+ }
+ }
+
+ double jacobian[4 * 3];
+ parameterization.ComputeJacobian(x, jacobian);
+ int delta_cursor = 0;
+ int jacobian_cursor = 0;
+ for (int j = 0; j < 4; ++j) {
+ if (j != i) {
+ for (int k = 0; k < 3; ++k, jacobian_cursor++) {
+ EXPECT_EQ(jacobian[jacobian_cursor], delta_cursor == k ? 1.0 : 0.0);
+ }
+ ++delta_cursor;
+ } else {
+ for (int k = 0; k < 3; ++k, jacobian_cursor++) {
+ EXPECT_EQ(jacobian[jacobian_cursor], 0.0);
+ }
+ }
+ }
+ };
+}
+
+// Functor needed to implement automatically differentiated Plus for
+// quaternions.
+struct QuaternionPlus {
+ template<typename T>
+ bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
+ const T squared_norm_delta =
+ delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2];
+
+ T q_delta[4];
+ if (squared_norm_delta > T(0.0)) {
+ T norm_delta = sqrt(squared_norm_delta);
+ const T sin_delta_by_delta = sin(norm_delta) / norm_delta;
+ q_delta[0] = cos(norm_delta);
+ q_delta[1] = sin_delta_by_delta * delta[0];
+ q_delta[2] = sin_delta_by_delta * delta[1];
+ q_delta[3] = sin_delta_by_delta * delta[2];
+ } else {
+ // We do not just use q_delta = [1,0,0,0] here because that is a
+ // constant and when used for automatic differentiation will
+ // lead to a zero derivative. Instead we take a first order
+ // approximation and evaluate it at zero.
+ q_delta[0] = T(1.0);
+ q_delta[1] = delta[0];
+ q_delta[2] = delta[1];
+ q_delta[3] = delta[2];
+ }
+
+ QuaternionProduct(q_delta, x, x_plus_delta);
+ return true;
+ }
+};
+
+void QuaternionParameterizationTestHelper(const double* x,
+ const double* delta,
+ const double* q_delta) {
+ const double kTolerance = 1e-14;
+ double x_plus_delta_ref[4] = {0.0, 0.0, 0.0, 0.0};
+ QuaternionProduct(q_delta, x, x_plus_delta_ref);
+
+ double x_plus_delta[4] = {0.0, 0.0, 0.0, 0.0};
+ QuaternionParameterization param;
+ param.Plus(x, delta, x_plus_delta);
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_NEAR(x_plus_delta[i], x_plus_delta_ref[i], kTolerance);
+ }
+
+ const double x_plus_delta_norm =
+ sqrt(x_plus_delta[0] * x_plus_delta[0] +
+ x_plus_delta[1] * x_plus_delta[1] +
+ x_plus_delta[2] * x_plus_delta[2] +
+ x_plus_delta[3] * x_plus_delta[3]);
+
+ EXPECT_NEAR(x_plus_delta_norm, 1.0, kTolerance);
+
+ double jacobian_ref[12];
+ double zero_delta[3] = {0.0, 0.0, 0.0};
+ const double* parameters[2] = {x, zero_delta};
+ double* jacobian_array[2] = { NULL, jacobian_ref };
+
+ // Autodiff jacobian at delta_x = 0.
+ internal::AutoDiff<QuaternionPlus, double, 4, 3>::Differentiate(
+ QuaternionPlus(), parameters, 4, x_plus_delta, jacobian_array);
+
+ double jacobian[12];
+ param.ComputeJacobian(x, jacobian);
+ for (int i = 0; i < 12; ++i) {
+ EXPECT_TRUE(IsFinite(jacobian[i]));
+ EXPECT_NEAR(jacobian[i], jacobian_ref[i], kTolerance)
+ << "Jacobian mismatch: i = " << i
+ << "\n Expected \n" << ConstMatrixRef(jacobian_ref, 4, 3)
+ << "\n Actual \n" << ConstMatrixRef(jacobian, 4, 3);
+ }
+}
+
+TEST(QuaternionParameterization, ZeroTest) {
+ double x[4] = {0.5, 0.5, 0.5, 0.5};
+ double delta[3] = {0.0, 0.0, 0.0};
+ double q_delta[4] = {1.0, 0.0, 0.0, 0.0};
+ QuaternionParameterizationTestHelper(x, delta, q_delta);
+}
+
+
+TEST(QuaternionParameterization, NearZeroTest) {
+ double x[4] = {0.52, 0.25, 0.15, 0.45};
+ double norm_x = sqrt(x[0] * x[0] +
+ x[1] * x[1] +
+ x[2] * x[2] +
+ x[3] * x[3]);
+ for (int i = 0; i < 4; ++i) {
+ x[i] = x[i] / norm_x;
+ }
+
+ double delta[3] = {0.24, 0.15, 0.10};
+ for (int i = 0; i < 3; ++i) {
+ delta[i] = delta[i] * 1e-14;
+ }
+
+ double q_delta[4];
+ q_delta[0] = 1.0;
+ q_delta[1] = delta[0];
+ q_delta[2] = delta[1];
+ q_delta[3] = delta[2];
+
+ QuaternionParameterizationTestHelper(x, delta, q_delta);
+}
+
+TEST(QuaternionParameterization, AwayFromZeroTest) {
+ double x[4] = {0.52, 0.25, 0.15, 0.45};
+ double norm_x = sqrt(x[0] * x[0] +
+ x[1] * x[1] +
+ x[2] * x[2] +
+ x[3] * x[3]);
+
+ for (int i = 0; i < 4; ++i) {
+ x[i] = x[i] / norm_x;
+ }
+
+ double delta[3] = {0.24, 0.15, 0.10};
+ const double delta_norm = sqrt(delta[0] * delta[0] +
+ delta[1] * delta[1] +
+ delta[2] * delta[2]);
+ double q_delta[4];
+ q_delta[0] = cos(delta_norm);
+ q_delta[1] = sin(delta_norm) / delta_norm * delta[0];
+ q_delta[2] = sin(delta_norm) / delta_norm * delta[1];
+ q_delta[3] = sin(delta_norm) / delta_norm * delta[2];
+
+ QuaternionParameterizationTestHelper(x, delta, q_delta);
+}
+
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/loss_function.cc b/internal/ceres/loss_function.cc
new file mode 100644
index 0000000..b948f28
--- /dev/null
+++ b/internal/ceres/loss_function.cc
@@ -0,0 +1,157 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Purpose: See .h file.
+
+#include "ceres/loss_function.h"
+
+#include <cmath>
+#include <cstddef>
+
+namespace ceres {
+
+void TrivialLoss::Evaluate(double s, double rho[3]) const {
+ rho[0] = s;
+ rho[1] = 1;
+ rho[2] = 0;
+}
+
+void HuberLoss::Evaluate(double s, double rho[3]) const {
+ if (s > b_) {
+ // Outlier region.
+ // 'r' is always positive.
+ const double r = sqrt(s);
+ rho[0] = 2 * a_ * r - b_;
+ rho[1] = a_ / r;
+ rho[2] = - rho[1] / (2 * s);
+ } else {
+ // Inlier region.
+ rho[0] = s;
+ rho[1] = 1;
+ rho[2] = 0;
+ }
+}
+
+void SoftLOneLoss::Evaluate(double s, double rho[3]) const {
+ const double sum = 1 + s * c_;
+ const double tmp = sqrt(sum);
+ // 'sum' and 'tmp' are always positive, assuming that 's' is.
+ rho[0] = 2 * b_ * (tmp - 1);
+ rho[1] = 1 / tmp;
+ rho[2] = - (c_ * rho[1]) / (2 * sum);
+}
+
+void CauchyLoss::Evaluate(double s, double rho[3]) const {
+ const double sum = 1 + s * c_;
+ const double inv = 1 / sum;
+ // 'sum' and 'inv' are always positive, assuming that 's' is.
+ rho[0] = b_ * log(sum);
+ rho[1] = inv;
+ rho[2] = - c_ * (inv * inv);
+}
+
+void ArctanLoss::Evaluate(double s, double rho[3]) const {
+ const double sum = 1 + s * s * b_;
+ const double inv = 1 / sum;
+ // 'sum' and 'inv' are always positive.
+ rho[0] = a_ * atan2(s, a_);
+ rho[1] = inv;
+ rho[2] = -2 * s * b_ * (inv * inv);
+}
+
+TolerantLoss::TolerantLoss(double a, double b)
+ : a_(a),
+ b_(b),
+ c_(b * log(1.0 + exp(-a / b))) {
+ CHECK_GE(a, 0.0);
+ CHECK_GT(b, 0.0);
+}
+
+void TolerantLoss::Evaluate(double s, double rho[3]) const {
+ const double x = (s - a_) / b_;
+ // The basic equation is rho[0] = b ln(1 + e^x). However, if e^x is too
+ // large, it will overflow. Since numerically 1 + e^x == e^x when the
+ // x is greater than about ln(2^53) for doubles, beyond this threshold
+ // we substitute x for ln(1 + e^x) as a numerically equivalent approximation.
+ static const double kLog2Pow53 = 36.7; // ln(MathLimits<double>::kEpsilon).
+ if (x > kLog2Pow53) {
+ rho[0] = s - a_ - c_;
+ rho[1] = 1.0;
+ rho[2] = 0.0;
+ } else {
+ const double e_x = exp(x);
+ rho[0] = b_ * log(1.0 + e_x) - c_;
+ rho[1] = e_x / (1.0 + e_x);
+ rho[2] = 0.5 / (b_ * (1.0 + cosh(x)));
+ }
+}
+
+ComposedLoss::ComposedLoss(const LossFunction* f, Ownership ownership_f,
+ const LossFunction* g, Ownership ownership_g)
+ : f_(CHECK_NOTNULL(f)),
+ g_(CHECK_NOTNULL(g)),
+ ownership_f_(ownership_f),
+ ownership_g_(ownership_g) {
+}
+
+ComposedLoss::~ComposedLoss() {
+ if (ownership_f_ == DO_NOT_TAKE_OWNERSHIP) {
+ f_.release();
+ }
+ if (ownership_g_ == DO_NOT_TAKE_OWNERSHIP) {
+ g_.release();
+ }
+}
+
+void ComposedLoss::Evaluate(double s, double rho[3]) const {
+ double rho_f[3], rho_g[3];
+ g_->Evaluate(s, rho_g);
+ f_->Evaluate(rho_g[0], rho_f);
+ rho[0] = rho_f[0];
+ // f'(g(s)) * g'(s).
+ rho[1] = rho_f[1] * rho_g[1];
+ // f''(g(s)) * g'(s) * g'(s) + f'(g(s)) * g''(s).
+ rho[2] = rho_f[2] * rho_g[1] * rho_g[1] + rho_f[1] * rho_g[2];
+}
+
+void ScaledLoss::Evaluate(double s, double rho[3]) const {
+ if (rho_.get() == NULL) {
+ rho[0] = a_ * s;
+ rho[1] = a_;
+ rho[2] = 0.0;
+ } else {
+ rho_->Evaluate(s, rho);
+ rho[0] *= a_;
+ rho[1] *= a_;
+ rho[2] *= a_;
+ }
+}
+
+} // namespace ceres
diff --git a/internal/ceres/loss_function_test.cc b/internal/ceres/loss_function_test.cc
new file mode 100644
index 0000000..0967406
--- /dev/null
+++ b/internal/ceres/loss_function_test.cc
@@ -0,0 +1,227 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/loss_function.h"
+
+#include <cstddef>
+
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// Helper function for testing a LossFunction callback.
+//
+// Compares the values of rho'(s) and rho''(s) computed by the
+// callback with estimates obtained by symmetric finite differencing
+// of rho(s).
+void AssertLossFunctionIsValid(const LossFunction& loss, double s) {
+ CHECK_GT(s, 0);
+
+ // Evaluate rho(s), rho'(s) and rho''(s).
+ double rho[3];
+ loss.Evaluate(s, rho);
+
+ // Use symmetric finite differencing to estimate rho'(s) and
+ // rho''(s).
+ const double kH = 1e-4;
+ // Values at s + kH.
+ double fwd[3];
+ // Values at s - kH.
+ double bwd[3];
+ loss.Evaluate(s + kH, fwd);
+ loss.Evaluate(s - kH, bwd);
+
+ // First derivative.
+ const double fd_1 = (fwd[0] - bwd[0]) / (2 * kH);
+ ASSERT_NEAR(fd_1, rho[1], 1e-6);
+
+ // Second derivative.
+ const double fd_2 = (fwd[0] - 2*rho[0] + bwd[0]) / (kH * kH);
+ ASSERT_NEAR(fd_2, rho[2], 1e-6);
+}
+} // namespace
+
+// Try two values of the scaling a = 0.7 and 1.3
+// (where scaling makes sense) and of the squared norm
+// s = 0.357 and 1.792
+//
+// Note that for the Huber loss the test exercises both code paths
+// (i.e. both small and large values of s).
+
+TEST(LossFunction, TrivialLoss) {
+ AssertLossFunctionIsValid(TrivialLoss(), 0.357);
+ AssertLossFunctionIsValid(TrivialLoss(), 1.792);
+}
+
+TEST(LossFunction, HuberLoss) {
+ AssertLossFunctionIsValid(HuberLoss(0.7), 0.357);
+ AssertLossFunctionIsValid(HuberLoss(0.7), 1.792);
+ AssertLossFunctionIsValid(HuberLoss(1.3), 0.357);
+ AssertLossFunctionIsValid(HuberLoss(1.3), 1.792);
+}
+
+TEST(LossFunction, SoftLOneLoss) {
+ AssertLossFunctionIsValid(SoftLOneLoss(0.7), 0.357);
+ AssertLossFunctionIsValid(SoftLOneLoss(0.7), 1.792);
+ AssertLossFunctionIsValid(SoftLOneLoss(1.3), 0.357);
+ AssertLossFunctionIsValid(SoftLOneLoss(1.3), 1.792);
+}
+
+TEST(LossFunction, CauchyLoss) {
+ AssertLossFunctionIsValid(CauchyLoss(0.7), 0.357);
+ AssertLossFunctionIsValid(CauchyLoss(0.7), 1.792);
+ AssertLossFunctionIsValid(CauchyLoss(1.3), 0.357);
+ AssertLossFunctionIsValid(CauchyLoss(1.3), 1.792);
+}
+
+TEST(LossFunction, ArctanLoss) {
+ AssertLossFunctionIsValid(ArctanLoss(0.7), 0.357);
+ AssertLossFunctionIsValid(ArctanLoss(0.7), 1.792);
+ AssertLossFunctionIsValid(ArctanLoss(1.3), 0.357);
+ AssertLossFunctionIsValid(ArctanLoss(1.3), 1.792);
+}
+
+TEST(LossFunction, TolerantLoss) {
+ AssertLossFunctionIsValid(TolerantLoss(0.7, 0.4), 0.357);
+ AssertLossFunctionIsValid(TolerantLoss(0.7, 0.4), 1.792);
+ AssertLossFunctionIsValid(TolerantLoss(0.7, 0.4), 55.5);
+ AssertLossFunctionIsValid(TolerantLoss(1.3, 0.1), 0.357);
+ AssertLossFunctionIsValid(TolerantLoss(1.3, 0.1), 1.792);
+ AssertLossFunctionIsValid(TolerantLoss(1.3, 0.1), 55.5);
+ // Check the value at zero is actually zero.
+ double rho[3];
+ TolerantLoss(0.7, 0.4).Evaluate(0.0, rho);
+ ASSERT_NEAR(rho[0], 0.0, 1e-6);
+ // Check that loss before and after the approximation threshold are good.
+ // A threshold of 36.7 is used by the implementation.
+ AssertLossFunctionIsValid(TolerantLoss(20.0, 1.0), 20.0 + 36.6);
+ AssertLossFunctionIsValid(TolerantLoss(20.0, 1.0), 20.0 + 36.7);
+ AssertLossFunctionIsValid(TolerantLoss(20.0, 1.0), 20.0 + 36.8);
+ AssertLossFunctionIsValid(TolerantLoss(20.0, 1.0), 20.0 + 1000.0);
+}
+
+TEST(LossFunction, ComposedLoss) {
+ {
+ HuberLoss f(0.7);
+ CauchyLoss g(1.3);
+ ComposedLoss c(&f, DO_NOT_TAKE_OWNERSHIP, &g, DO_NOT_TAKE_OWNERSHIP);
+ AssertLossFunctionIsValid(c, 0.357);
+ AssertLossFunctionIsValid(c, 1.792);
+ }
+ {
+ CauchyLoss f(0.7);
+ HuberLoss g(1.3);
+ ComposedLoss c(&f, DO_NOT_TAKE_OWNERSHIP, &g, DO_NOT_TAKE_OWNERSHIP);
+ AssertLossFunctionIsValid(c, 0.357);
+ AssertLossFunctionIsValid(c, 1.792);
+ }
+}
+
+TEST(LossFunction, ScaledLoss) {
+ // Wrap a few loss functions, and a few scale factors. This can't combine
+ // construction with the call to AssertLossFunctionIsValid() because Apple's
+ // GCC is unable to eliminate the copy of ScaledLoss, which is not copyable.
+ {
+ ScaledLoss scaled_loss(NULL, 6, TAKE_OWNERSHIP);
+ AssertLossFunctionIsValid(scaled_loss, 0.323);
+ }
+ {
+ ScaledLoss scaled_loss(new TrivialLoss(), 10, TAKE_OWNERSHIP);
+ AssertLossFunctionIsValid(scaled_loss, 0.357);
+ }
+ {
+ ScaledLoss scaled_loss(new HuberLoss(0.7), 0.1, TAKE_OWNERSHIP);
+ AssertLossFunctionIsValid(scaled_loss, 1.792);
+ }
+ {
+ ScaledLoss scaled_loss(new SoftLOneLoss(1.3), 0.1, TAKE_OWNERSHIP);
+ AssertLossFunctionIsValid(scaled_loss, 1.792);
+ }
+ {
+ ScaledLoss scaled_loss(new CauchyLoss(1.3), 10, TAKE_OWNERSHIP);
+ AssertLossFunctionIsValid(scaled_loss, 1.792);
+ }
+ {
+ ScaledLoss scaled_loss(new ArctanLoss(1.3), 10, TAKE_OWNERSHIP);
+ AssertLossFunctionIsValid(scaled_loss, 1.792);
+ }
+ {
+ ScaledLoss scaled_loss(
+ new TolerantLoss(1.3, 0.1), 10, TAKE_OWNERSHIP);
+ AssertLossFunctionIsValid(scaled_loss, 1.792);
+ }
+ {
+ ScaledLoss scaled_loss(
+ new ComposedLoss(
+ new HuberLoss(0.8), TAKE_OWNERSHIP,
+ new TolerantLoss(1.3, 0.5), TAKE_OWNERSHIP), 10, TAKE_OWNERSHIP);
+ AssertLossFunctionIsValid(scaled_loss, 1.792);
+ }
+}
+
+TEST(LossFunction, LossFunctionWrapper) {
+ // Initialization
+ HuberLoss loss_function1(1.0);
+ LossFunctionWrapper loss_function_wrapper(new HuberLoss(1.0),
+ TAKE_OWNERSHIP);
+
+ double s = 0.862;
+ double rho_gold[3];
+ double rho[3];
+ loss_function1.Evaluate(s, rho_gold);
+ loss_function_wrapper.Evaluate(s, rho);
+ for (int i = 0; i < 3; ++i) {
+ EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
+ }
+
+ // Resetting
+ HuberLoss loss_function2(0.5);
+ loss_function_wrapper.Reset(new HuberLoss(0.5), TAKE_OWNERSHIP);
+ loss_function_wrapper.Evaluate(s, rho);
+ loss_function2.Evaluate(s, rho_gold);
+ for (int i = 0; i < 3; ++i) {
+ EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
+ }
+
+ // Not taking ownership.
+ HuberLoss loss_function3(0.3);
+ loss_function_wrapper.Reset(&loss_function3, DO_NOT_TAKE_OWNERSHIP);
+ loss_function_wrapper.Evaluate(s, rho);
+ loss_function3.Evaluate(s, rho_gold);
+ for (int i = 0; i < 3; ++i) {
+ EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/map_util.h b/internal/ceres/map_util.h
new file mode 100644
index 0000000..ddf1252
--- /dev/null
+++ b/internal/ceres/map_util.h
@@ -0,0 +1,129 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Originally by Anton Carver
+
+#ifndef CERES_INTERNAL_MAP_UTIL_H_
+#define CERES_INTERNAL_MAP_UTIL_H_
+
+#include <utility>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+// Perform a lookup in a map or hash_map, assuming that the key exists.
+// Crash if it does not.
+//
+// This is intended as a replacement for operator[] as an rvalue (for reading)
+// when the key is guaranteed to exist.
+//
+// operator[] is discouraged for several reasons:
+// * It has a side-effect of inserting missing keys
+// * It is not thread-safe (even when it is not inserting, it can still
+// choose to resize the underlying storage)
+// * It invalidates iterators (when it chooses to resize)
+// * It default constructs a value object even if it doesn't need to
+//
+// This version assumes the key is printable, and includes it in the fatal log
+// message.
+template <class Collection>
+const typename Collection::value_type::second_type&
+FindOrDie(const Collection& collection,
+ const typename Collection::value_type::first_type& key) {
+ typename Collection::const_iterator it = collection.find(key);
+ CHECK(it != collection.end()) << "Map key not found: " << key;
+ return it->second;
+}
+
+// Perform a lookup in a map or hash_map.
+// If the key is present in the map then the value associated with that
+// key is returned, otherwise the value passed as a default is returned.
+template <class Collection>
+const typename Collection::value_type::second_type&
+FindWithDefault(const Collection& collection,
+ const typename Collection::value_type::first_type& key,
+ const typename Collection::value_type::second_type& value) {
+ typename Collection::const_iterator it = collection.find(key);
+ if (it == collection.end()) {
+ return value;
+ }
+ return it->second;
+}
+
+// Insert a new key and value into a map or hash_map.
+// If the key is not present in the map the key and value are
+// inserted, otherwise nothing happens. True indicates that an insert
+// took place, false indicates the key was already present.
+template <class Collection>
+bool InsertIfNotPresent(
+ Collection * const collection,
+ const typename Collection::value_type::first_type& key,
+ const typename Collection::value_type::second_type& value) {
+ pair<typename Collection::iterator, bool> ret =
+ collection->insert(typename Collection::value_type(key, value));
+ return ret.second;
+}
+
+// Perform a lookup in a map or hash_map.
+// Same as above but the returned pointer is not const and can be used to change
+// the stored value.
+template <class Collection>
+typename Collection::value_type::second_type*
+FindOrNull(Collection& collection, // NOLINT
+ const typename Collection::value_type::first_type& key) {
+ typename Collection::iterator it = collection.find(key);
+ if (it == collection.end()) {
+ return 0;
+ }
+ return &it->second;
+}
+
+// Test to see if a set, map, hash_set or hash_map contains a particular key.
+// Returns true if the key is in the collection.
+template <class Collection, class Key>
+bool ContainsKey(const Collection& collection, const Key& key) {
+ typename Collection::const_iterator it = collection.find(key);
+ return it != collection.end();
+}
+
+// Inserts a new key/value into a map or hash_map.
+// Dies if the key is already present.
+template<class Collection>
+void InsertOrDie(Collection* const collection,
+ const typename Collection::value_type::first_type& key,
+ const typename Collection::value_type::second_type& data) {
+ typedef typename Collection::value_type value_type;
+ CHECK(collection->insert(value_type(key, data)).second)
+ << "duplicate key: " << key;
+}
+
+} // namespace ceres
+
+#endif // CERES_INTERNAL_MAP_UTIL_H_
diff --git a/internal/ceres/matrix.proto b/internal/ceres/matrix.proto
new file mode 100644
index 0000000..55a01d2
--- /dev/null
+++ b/internal/ceres/matrix.proto
@@ -0,0 +1,143 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+syntax = "proto2";
+
+package ceres.internal;
+
+message BlockProto {
+ // The span of the block.
+ optional int32 size = 1;
+
+ // Position along the row or column (depending on storage orientation).
+ optional int32 position = 2;
+}
+
+message CellProto {
+ // Column or row block id as appropriate.
+ optional int32 block_id = 1;
+
+ // Position in the values array the cell is located. Each cell is stored as a
+ // row-major chunk inside the values array.
+ optional int32 position = 2;
+}
+
+// A single row or column, depending on the matrix type.
+message CompressedRowProto {
+ optional BlockProto block = 2;
+ repeated CellProto cells = 1;
+}
+
+message BlockStructureProto {
+ repeated BlockProto cols = 1;
+ repeated CompressedRowProto rows = 2;
+}
+
+// A block sparse matrix, either in column major or row major format.
+message BlockSparseMatrixProto {
+ optional int64 num_rows = 2;
+ optional int64 num_cols = 3;
+ optional int64 num_nonzeros = 4;
+ repeated double values = 1 [packed=true];
+
+ optional BlockStructureProto block_structure = 5;
+}
+
+message TripletSparseMatrixProto {
+ optional int64 num_rows = 4;
+ optional int64 num_cols = 5;
+ optional int64 num_nonzeros = 6;
+
+ // The data is stored as three arrays. For each i, values(i) is stored at the
+ // location (rows(i), cols(i)). If the there are multiple entries with the
+ // same (rows(i), cols(i)), the values entries corresponding to them are
+ // summed up.
+ repeated int64 rows = 1 [packed=true];
+ repeated int64 cols = 2 [packed=true];
+ repeated double values = 3 [packed=true];
+}
+
+message CompressedRowSparseMatrixProto {
+ optional int64 num_rows = 4;
+ optional int64 num_cols = 5;
+
+ repeated int64 rows = 1 [packed=true];
+ repeated int64 cols = 2 [packed=true];
+ repeated double values = 3 [packed=true];
+}
+
+message DenseSparseMatrixProto {
+ optional int64 num_rows = 1;
+ optional int64 num_cols = 2;
+
+ // Entries are stored in row-major order.
+ repeated double values = 3 [packed=true];
+}
+
+// A sparse matrix. It is a union; only one field is permitted. If new sparse
+// implementations are added, update this proto accordingly.
+message SparseMatrixProto {
+ optional TripletSparseMatrixProto triplet_matrix = 1;
+ optional BlockSparseMatrixProto block_matrix = 2;
+ optional CompressedRowSparseMatrixProto compressed_row_matrix = 3;
+ optional DenseSparseMatrixProto dense_matrix = 4;
+}
+
+// A linear least squares problem.
+//
+// Given a matrix A, an optional diagonal matrix D as a vector, and a vector b,
+// the proto represents the following linear least squares problem.
+//
+// | A | x = | b |
+// | D | | 0 |
+//
+// If D is empty, then the problem is considered to be
+//
+// A x = b
+//
+// The desired solution for the problem is the vector x that solves the
+// following optimization problem:
+//
+// arg min_x ||Ax - b||^2 + ||Dx||^2
+//
+// If x is present, then it is the expected solution to the
+// problem. The dimensions of A, b, x, and D should be consistent.
+message LinearLeastSquaresProblemProto {
+ optional SparseMatrixProto a = 1;
+ repeated double b = 2 [packed=true];
+ repeated double d = 3 [packed=true];
+ repeated double x = 4 [packed=true];
+ // If the problem is of SfM type, i.e it has a generalized
+ // bi-partite structure, then num_eliminate_blocks is the number of
+ // column blocks that are to eliminated in the formation of the
+ // Schur complement. For more details see
+ // explicit_schur_complement_solver.h.
+ optional int32 num_eliminate_blocks = 5;
+}
diff --git a/internal/ceres/matrix_proto.h b/internal/ceres/matrix_proto.h
new file mode 100644
index 0000000..94b3076
--- /dev/null
+++ b/internal/ceres/matrix_proto.h
@@ -0,0 +1,40 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A portability header to make optional protocol buffer support less intrusive.
+
+#ifndef CERES_INTERNAL_MATRIX_PROTO_H_
+#define CERES_INTERNAL_MATRIX_PROTO_H_
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+#include "ceres/matrix.pb.h"
+#endif
+
+#endif // CERES_INTERNAL_MATRIX_PROTO_H_
diff --git a/internal/ceres/miniglog/glog/logging.cc b/internal/ceres/miniglog/glog/logging.cc
new file mode 100644
index 0000000..32a78ce
--- /dev/null
+++ b/internal/ceres/miniglog/glog/logging.cc
@@ -0,0 +1,39 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "glog/logging.h"
+
+namespace google {
+
+// This is the set of log sinks. This must be in a separate library to ensure
+// that there is only one instance of this across the entire program.
+std::set<google::LogSink *> log_sinks_global;
+
+} // namespace ceres
diff --git a/internal/ceres/miniglog/glog/logging.h b/internal/ceres/miniglog/glog/logging.h
new file mode 100644
index 0000000..1fc137b
--- /dev/null
+++ b/internal/ceres/miniglog/glog/logging.h
@@ -0,0 +1,391 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: settinger@google.com (Scott Ettinger)
+// keir@google.com (Keir Mierle)
+//
+// Simplified Glog style logging with Android support. Supported macros in
+// decreasing severity level per line:
+//
+// VLOG(2), VLOG(N)
+// VLOG(1),
+// LOG(INFO), VLOG(0), LG
+// LOG(WARNING),
+// LOG(ERROR),
+// LOG(FATAL),
+//
+// With VLOG(n), the output is directed to one of the 5 Android log levels:
+//
+// 2 - Verbose
+// 1 - Debug
+// 0 - Info
+// -1 - Warning
+// -2 - Error
+// -3 - Fatal
+//
+// Any logging of level 2 and above is directed to the Verbose level. All
+// Android log output is tagged with the string "native".
+//
+// If the symbol ANDROID is not defined, all output goes to std::cerr.
+// This allows code to be built on a different system for debug.
+//
+// Portions of this code are taken from the GLOG package. This code is only a
+// small subset of the GLOG functionality. Notable differences from GLOG
+// behavior include lack of support for displaying unprintable characters and
+// lack of stack trace information upon failure of the CHECK macros. On
+// non-Android systems, log output goes to std::cerr and is not written to a
+// file.
+//
+// CHECK macros are defined to test for conditions within code. Any CHECK that
+// fails will log the failure and terminate the application.
+// e.g. CHECK_GE(3, 2) will pass while CHECK_GE(3, 4) will fail after logging
+// "Check failed 3 >= 4".
+//
+// The following CHECK macros are defined:
+//
+// CHECK(condition) - fails if condition is false and logs condition.
+// CHECK_NOTNULL(variable) - fails if the variable is NULL.
+//
+// The following binary check macros are also defined :
+//
+// Macro Operator equivalent
+// -------------------- -------------------
+// CHECK_EQ(val1, val2) val1 == val2
+// CHECK_NE(val1, val2) val1 != val2
+// CHECK_GT(val1, val2) val1 > val2
+// CHECK_GE(val1, val2) val1 >= val2
+// CHECK_LT(val1, val2) val1 < val2
+// CHECK_LE(val1, val2) val1 <= val2
+//
+// Debug only versions of all of the check macros are also defined. These
+// macros generate no code in a release build, but avoid unused variable
+// warnings / errors.
+//
+// To use the debug only versions, prepend a D to the normal check macros, e.g.
+// DCHECK_EQ(a, b).
+
+#ifndef CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
+#define CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
+
+#ifdef ANDROID
+#include <android/log.h>
+#endif // ANDROID
+
+#include <algorithm>
+#include <iostream>
+#include <string>
+#include <fstream>
+#include <set>
+#include <sstream>
+#include <vector>
+
+// Log severity level constants.
+const int FATAL = -3;
+const int ERROR = -2;
+const int WARNING = -1;
+const int INFO = 0;
+
+// ------------------------- Glog compatibility ------------------------------
+
+namespace google {
+
+typedef int LogSeverity;
+const int INFO = ::INFO;
+const int WARNING = ::WARNING;
+const int ERROR = ::ERROR;
+const int FATAL = ::FATAL;
+
+// Sink class used for integration with mock and test functions. If sinks are
+// added, all log output is also sent to each sink through the send function.
+// In this implementation, WaitTillSent() is called immediately after the send.
+// This implementation is not thread safe.
+class LogSink {
+ public:
+ virtual ~LogSink() {}
+ virtual void send(LogSeverity severity,
+ const char* full_filename,
+ const char* base_filename,
+ int line,
+ const struct tm* tm_time,
+ const char* message,
+ size_t message_len) = 0;
+ virtual void WaitTillSent() = 0;
+};
+
+// Global set of log sinks. The actual object is defined in logging.cc.
+extern std::set<LogSink *> log_sinks_global;
+
+inline void InitGoogleLogging(char *argv) {
+ // Do nothing; this is ignored.
+}
+
+// Note: the Log sink functions are not thread safe.
+inline void AddLogSink(LogSink *sink) {
+ // TODO(settinger): Add locks for thread safety.
+ log_sinks_global.insert(sink);
+}
+inline void RemoveLogSink(LogSink *sink) {
+ log_sinks_global.erase(sink);
+}
+
+} // namespace google
+
+// ---------------------------- Logger Class --------------------------------
+
+// Class created for each use of the logging macros.
+// The logger acts as a stream and routes the final stream contents to the
+// Android logcat output at the proper filter level. If ANDROID is not
+// defined, output is directed to std::cerr. This class should not
+// be directly instantiated in code, rather it should be invoked through the
+// use of the log macros LG, LOG, or VLOG.
+class MessageLogger {
+ public:
+ MessageLogger(const char *file, int line, const char *tag, int severity)
+ : file_(file), line_(line), tag_(tag), severity_(severity) {
+ // Pre-pend the stream with the file and line number.
+ StripBasename(std::string(file), &filename_only_);
+ stream_ << filename_only_ << ":" << line << " ";
+ }
+
+ // Output the contents of the stream to the proper channel on destruction.
+ ~MessageLogger() {
+ stream_ << "\n";
+
+#ifdef ANDROID
+ static const int android_log_levels[] = {
+ ANDROID_LOG_FATAL, // LOG(FATAL)
+ ANDROID_LOG_ERROR, // LOG(ERROR)
+ ANDROID_LOG_WARN, // LOG(WARNING)
+ ANDROID_LOG_INFO, // LOG(INFO), LG, VLOG(0)
+ ANDROID_LOG_DEBUG, // VLOG(1)
+ ANDROID_LOG_VERBOSE, // VLOG(2) .. VLOG(N)
+ };
+
+ // Bound the logging level.
+ const int kMaxVerboseLevel = 2;
+ int android_level_index = std::min(std::max(FATAL, severity_),
+ kMaxVerboseLevel) - FATAL;
+ int android_log_level = android_log_levels[android_level_index];
+
+ // Output the log string the Android log at the appropriate level.
+ __android_log_print(android_log_level, tag_.c_str(), stream_.str().c_str());
+
+ // Indicate termination if needed.
+ if (severity_ == FATAL) {
+ __android_log_print(ANDROID_LOG_FATAL,
+ tag_.c_str(),
+ "terminating.\n");
+ }
+#else
+ // If not building on Android, log all output to std::cerr.
+ std::cerr << stream_.str();
+#endif // ANDROID
+
+ LogToSinks(severity_);
+ WaitForSinks();
+
+ // Android logging at level FATAL does not terminate execution, so abort()
+ // is still required to stop the program.
+ if (severity_ == FATAL) {
+ abort();
+ }
+ }
+
+ // Return the stream associated with the logger object.
+ std::stringstream &stream() { return stream_; }
+
+ private:
+ void LogToSinks(int severity) {
+ time_t rawtime;
+ struct tm* timeinfo;
+
+ time (&rawtime);
+ timeinfo = localtime(&rawtime);
+ std::set<google::LogSink*>::iterator iter;
+ // Send the log message to all sinks.
+ for (iter = google::log_sinks_global.begin();
+ iter != google::log_sinks_global.end(); ++iter) {
+ (*iter)->send(severity, file_.c_str(), filename_only_.c_str(), line_,
+ timeinfo, stream_.str().c_str(), stream_.str().size());
+ }
+ }
+
+ void WaitForSinks() {
+ // TODO(settinger): Add locks for thread safety.
+ std::set<google::LogSink *>::iterator iter;
+
+ // Call WaitTillSent() for all sinks.
+ for (iter = google::log_sinks_global.begin();
+ iter != google::log_sinks_global.end(); ++iter) {
+ (*iter)->WaitTillSent();
+ }
+ }
+
+ void StripBasename(const std::string &full_path, std::string *filename) {
+ // TODO(settinger): add support for OS with different path separators.
+ const char kSeparator = '/';
+ size_t pos = full_path.rfind(kSeparator);
+ if (pos != std::string::npos) {
+ *filename = full_path.substr(pos + 1, std::string::npos);
+ } else {
+ *filename = full_path;
+ }
+ }
+
+ std::string file_;
+ std::string filename_only_;
+ int line_;
+ std::string tag_;
+ std::stringstream stream_;
+ int severity_;
+};
+
+// ---------------------- Logging Macro definitions --------------------------
+
+#define LG MessageLogger((char *)__FILE__, __LINE__, "native", \
+ INFO).stream()
+
+#define LOG(n) MessageLogger((char *)__FILE__, __LINE__, "native", \
+ n).stream()
+
+#define VLOG(n) MessageLogger((char *)__FILE__, __LINE__, "native", \
+ n).stream()
+
+// Currently, VLOG is always on.
+#define VLOG_IS_ON(x) true
+
+// ---------------------------- CHECK helpers --------------------------------
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros. This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+class LoggerVoidify {
+ public:
+ LoggerVoidify() { }
+ // This has to be an operator with a precedence lower than << but
+ // higher than ?:
+ void operator&(const std::ostream &s) { }
+};
+
+// Log only if condition is met. Otherwise evaluates to void.
+#define LOG_IF(severity, condition) \
+ condition ? (void) 0 : LoggerVoidify() & LOG(severity)
+
+// Log a message and terminate.
+template<class T>
+void LogMessageFatal(const char *file, int line, const T &message) {
+ MessageLogger((char *)__FILE__, __LINE__, "native", FATAL).stream()
+ << message;
+}
+
+// ---------------------------- CHECK macros ---------------------------------
+
+// Check for a given boolean condition.
+#define CHECK(condition) LOG_IF(FATAL, condition) \
+ << "Check failed: " #condition " "
+
+#ifndef NDEBUG
+// Debug only version of CHECK
+#define DCHECK(condition) LOG_IF(FATAL, condition) \
+ << "Check failed: " #condition " "
+#else
+// Optimized version - generates no code.
+#define DCHECK(condition) if (false) LOG_IF(FATAL, condition) \
+ << "Check failed: " #condition " "
+#endif // NDEBUG
+
+// ------------------------- CHECK_OP macros ---------------------------------
+
+// Generic binary operator check macro. This should not be directly invoked,
+// instead use the binary comparison macros defined below.
+#define CHECK_OP(val1, val2, op) LOG_IF(FATAL, (val1 op val2)) \
+ << "Check failed: " #val1 " " #op " " #val2 " "
+
+// Check_op macro definitions
+#define CHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)
+#define CHECK_NE(val1, val2) CHECK_OP(val1, val2, !=)
+#define CHECK_LE(val1, val2) CHECK_OP(val1, val2, <=)
+#define CHECK_LT(val1, val2) CHECK_OP(val1, val2, <)
+#define CHECK_GE(val1, val2) CHECK_OP(val1, val2, >=)
+#define CHECK_GT(val1, val2) CHECK_OP(val1, val2, >)
+
+#ifndef NDEBUG
+// Debug only versions of CHECK_OP macros.
+#define DCHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)
+#define DCHECK_NE(val1, val2) CHECK_OP(val1, val2, !=)
+#define DCHECK_LE(val1, val2) CHECK_OP(val1, val2, <=)
+#define DCHECK_LT(val1, val2) CHECK_OP(val1, val2, <)
+#define DCHECK_GE(val1, val2) CHECK_OP(val1, val2, >=)
+#define DCHECK_GT(val1, val2) CHECK_OP(val1, val2, >)
+#else
+// These versions generate no code in optimized mode.
+#define DCHECK_EQ(val1, val2) if (false) CHECK_OP(val1, val2, ==)
+#define DCHECK_NE(val1, val2) if (false) CHECK_OP(val1, val2, !=)
+#define DCHECK_LE(val1, val2) if (false) CHECK_OP(val1, val2, <=)
+#define DCHECK_LT(val1, val2) if (false) CHECK_OP(val1, val2, <)
+#define DCHECK_GE(val1, val2) if (false) CHECK_OP(val1, val2, >=)
+#define DCHECK_GT(val1, val2) if (false) CHECK_OP(val1, val2, >)
+#endif // NDEBUG
+
+// ---------------------------CHECK_NOTNULL macros ---------------------------
+
+// Helpers for CHECK_NOTNULL(). Two are necessary to support both raw pointers
+// and smart pointers.
+template <typename T>
+T& CheckNotNullCommon(const char *file, int line, const char *names, T& t) {
+ if (t == NULL) {
+ LogMessageFatal(file, line, std::string(names));
+ }
+ return t;
+}
+
+template <typename T>
+T* CheckNotNull(const char *file, int line, const char *names, T* t) {
+ return CheckNotNullCommon(file, line, names, t);
+}
+
+template <typename T>
+T& CheckNotNull(const char *file, int line, const char *names, T& t) {
+ return CheckNotNullCommon(file, line, names, t);
+}
+
+// Check that a pointer is not null.
+#define CHECK_NOTNULL(val) \
+ CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
+
+#ifndef NDEBUG
+// Debug only version of CHECK_NOTNULL
+#define DCHECK_NOTNULL(val) \
+ CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
+#else
+// Optimized version - generates no code.
+#define DCHECK_NOTNULL(val) if (false)\
+ CheckNotNull(__FILE__, __LINE__, "'" #val "' Must be non NULL", (val))
+#endif // NDEBUG
+
+#endif // CERCES_INTERNAL_MINIGLOG_GLOG_LOGGING_H_
diff --git a/internal/ceres/minimizer.h b/internal/ceres/minimizer.h
new file mode 100644
index 0000000..22c10f0
--- /dev/null
+++ b/internal/ceres/minimizer.h
@@ -0,0 +1,149 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_MINIMIZER_H_
+#define CERES_INTERNAL_MINIMIZER_H_
+
+#include <vector>
+#include "ceres/solver.h"
+#include "ceres/iteration_callback.h"
+
+namespace ceres {
+namespace internal {
+
+class Evaluator;
+class LinearSolver;
+class SparseMatrix;
+class TrustRegionStrategy;
+
+// Interface for non-linear least squares solvers.
+class Minimizer {
+ public:
+ // Options struct to control the behaviour of the Minimizer. Please
+ // see solver.h for detailed information about the meaning and
+ // default values of each of these parameters.
+ struct Options {
+ Options() {
+ Init(Solver::Options());
+ }
+
+ explicit Options(const Solver::Options& options) {
+ Init(options);
+ }
+
+ void Init(const Solver::Options& options) {
+ num_threads = options.num_threads;
+ max_num_iterations = options.max_num_iterations;
+ max_solver_time_in_seconds = options.max_solver_time_in_seconds;
+ max_step_solver_retries = 5;
+ gradient_tolerance = options.gradient_tolerance;
+ parameter_tolerance = options.parameter_tolerance;
+ function_tolerance = options.function_tolerance;
+ min_relative_decrease = options.min_relative_decrease;
+ eta = options.eta;
+ jacobi_scaling = options.jacobi_scaling;
+ use_nonmonotonic_steps = options.use_nonmonotonic_steps;
+ max_consecutive_nonmonotonic_steps =
+ options.max_consecutive_nonmonotonic_steps;
+ lsqp_dump_directory = options.lsqp_dump_directory;
+ lsqp_iterations_to_dump = options.lsqp_iterations_to_dump;
+ lsqp_dump_format_type = options.lsqp_dump_format_type;
+ max_num_consecutive_invalid_steps =
+ options.max_num_consecutive_invalid_steps;
+ min_trust_region_radius = options.min_trust_region_radius;
+ evaluator = NULL;
+ trust_region_strategy = NULL;
+ jacobian = NULL;
+ callbacks = options.callbacks;
+ inner_iteration_minimizer = NULL;
+ }
+
+ int max_num_iterations;
+ double max_solver_time_in_seconds;
+
+ // Number of times the linear solver should be retried in case of
+ // numerical failure. The retries are done by exponentially scaling up
+ // mu at each retry. This leads to stronger and stronger
+ // regularization making the linear least squares problem better
+ // conditioned at each retry.
+ int num_threads;
+ int max_step_solver_retries;
+ double gradient_tolerance;
+ double parameter_tolerance;
+ double function_tolerance;
+ double min_relative_decrease;
+ double eta;
+ bool jacobi_scaling;
+ bool use_nonmonotonic_steps;
+ int max_consecutive_nonmonotonic_steps;
+ vector<int> lsqp_iterations_to_dump;
+ DumpFormatType lsqp_dump_format_type;
+ string lsqp_dump_directory;
+ int max_num_consecutive_invalid_steps;
+ int min_trust_region_radius;
+
+ // List of callbacks that are executed by the Minimizer at the end
+ // of each iteration.
+ //
+ // The Options struct does not own these pointers.
+ vector<IterationCallback*> callbacks;
+
+ // Object responsible for evaluating the cost, residuals and
+ // Jacobian matrix. The Options struct does not own this pointer.
+ Evaluator* evaluator;
+
+ // Object responsible for actually computing the trust region
+ // step, and sizing the trust region radius. The Options struct
+ // does not own this pointer.
+ TrustRegionStrategy* trust_region_strategy;
+
+ // Object holding the Jacobian matrix. It is assumed that the
+ // sparsity structure of the matrix has already been initialized
+ // and will remain constant for the life time of the
+ // optimization. The Options struct does not own this pointer.
+ SparseMatrix* jacobian;
+
+ Minimizer* inner_iteration_minimizer;
+ };
+
+ virtual ~Minimizer() {}
+
+ // Note: The minimizer is expected to update the state of the
+ // parameters array every iteration. This is required for the
+ // StateUpdatingCallback to work.
+ virtual void Minimize(const Options& options,
+ double* parameters,
+ Solver::Summary* summary) = 0;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_MINIMIZER_H_
diff --git a/internal/ceres/minimizer_test.cc b/internal/ceres/minimizer_test.cc
new file mode 100644
index 0000000..1058036
--- /dev/null
+++ b/internal/ceres/minimizer_test.cc
@@ -0,0 +1,63 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "gtest/gtest.h"
+#include "ceres/iteration_callback.h"
+#include "ceres/minimizer.h"
+#include "ceres/solver.h"
+
+namespace ceres {
+namespace internal {
+
+class FakeIterationCallback : public IterationCallback {
+ public:
+ virtual ~FakeIterationCallback() {}
+ virtual CallbackReturnType operator()(const IterationSummary& summary) {
+ return SOLVER_CONTINUE;
+ }
+};
+
+TEST(MinimizerTest, InitializationCopiesCallbacks) {
+ FakeIterationCallback callback0;
+ FakeIterationCallback callback1;
+
+ Solver::Options solver_options;
+ solver_options.callbacks.push_back(&callback0);
+ solver_options.callbacks.push_back(&callback1);
+
+ Minimizer::Options minimizer_options(solver_options);
+ ASSERT_EQ(2, minimizer_options.callbacks.size());
+
+ EXPECT_EQ(minimizer_options.callbacks[0], &callback0);
+ EXPECT_EQ(minimizer_options.callbacks[1], &callback1);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/mutex.h b/internal/ceres/mutex.h
new file mode 100644
index 0000000..5090a71
--- /dev/null
+++ b/internal/ceres/mutex.h
@@ -0,0 +1,322 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Craig Silverstein.
+//
+// A simple mutex wrapper, supporting locks and read-write locks.
+// You should assume the locks are *not* re-entrant.
+//
+// This class is meant to be internal-only and should be wrapped by an
+// internal namespace. Before you use this module, please give the
+// name of your internal namespace for this module. Or, if you want
+// to expose it, you'll want to move it to the Google namespace. We
+// cannot put this class in global namespace because there can be some
+// problems when we have multiple versions of Mutex in each shared object.
+//
+// NOTE: by default, we have #ifdef'ed out the TryLock() method.
+// This is for two reasons:
+// 1) TryLock() under Windows is a bit annoying (it requires a
+// #define to be defined very early).
+// 2) TryLock() is broken for NO_THREADS mode, at least in NDEBUG
+// mode.
+// If you need TryLock(), and either these two caveats are not a
+// problem for you, or you're willing to work around them, then
+// feel free to #define GMUTEX_TRYLOCK, or to remove the #ifdefs
+// in the code below.
+//
+// CYGWIN NOTE: Cygwin support for rwlock seems to be buggy:
+// http://www.cygwin.com/ml/cygwin/2008-12/msg00017.html
+// Because of that, we might as well use windows locks for
+// cygwin. They seem to be more reliable than the cygwin pthreads layer.
+//
+// TRICKY IMPLEMENTATION NOTE:
+// This class is designed to be safe to use during
+// dynamic-initialization -- that is, by global constructors that are
+// run before main() starts. The issue in this case is that
+// dynamic-initialization happens in an unpredictable order, and it
+// could be that someone else's dynamic initializer could call a
+// function that tries to acquire this mutex -- but that all happens
+// before this mutex's constructor has run. (This can happen even if
+// the mutex and the function that uses the mutex are in the same .cc
+// file.) Basically, because Mutex does non-trivial work in its
+// constructor, it's not, in the naive implementation, safe to use
+// before dynamic initialization has run on it.
+//
+// The solution used here is to pair the actual mutex primitive with a
+// bool that is set to true when the mutex is dynamically initialized.
+// (Before that it's false.) Then we modify all mutex routines to
+// look at the bool, and not try to lock/unlock until the bool makes
+// it to true (which happens after the Mutex constructor has run.)
+//
+// This works because before main() starts -- particularly, during
+// dynamic initialization -- there are no threads, so a) it's ok that
+// the mutex operations are a no-op, since we don't need locking then
+// anyway; and b) we can be quite confident our bool won't change
+// state between a call to Lock() and a call to Unlock() (that would
+// require a global constructor in one translation unit to call Lock()
+// and another global constructor in another translation unit to call
+// Unlock() later, which is pretty perverse).
+//
+// That said, it's tricky, and can conceivably fail; it's safest to
+// avoid trying to acquire a mutex in a global constructor, if you
+// can. One way it can fail is that a really smart compiler might
+// initialize the bool to true at static-initialization time (too
+// early) rather than at dynamic-initialization time. To discourage
+// that, we set is_safe_ to true in code (not the constructor
+// colon-initializer) and set it to true via a function that always
+// evaluates to true, but that the compiler can't know always
+// evaluates to true. This should be good enough.
+
+#ifndef CERES_INTERNAL_MUTEX_H_
+#define CERES_INTERNAL_MUTEX_H_
+
+#if defined(CERES_NO_THREADS)
+ typedef int MutexType; // to keep a lock-count
+#elif defined(_WIN32) || defined(__CYGWIN32__) || defined(__CYGWIN64__)
+# define CERES_WIN32_LEAN_AND_MEAN // We only need minimal includes
+# ifdef CERES_GMUTEX_TRYLOCK
+ // We need Windows NT or later for TryEnterCriticalSection(). If you
+ // don't need that functionality, you can remove these _WIN32_WINNT
+ // lines, and change TryLock() to assert(0) or something.
+# ifndef _WIN32_WINNT
+# define _WIN32_WINNT 0x0400
+# endif
+# endif
+// To avoid macro definition of ERROR.
+# define CERES_NOGDI
+// To avoid macro definition of min/max.
+# define CERES_NOMINMAX
+# include <windows.h>
+ typedef CRITICAL_SECTION MutexType;
+#elif defined(CERES_HAVE_PTHREAD) && defined(CERES_HAVE_RWLOCK)
+ // Needed for pthread_rwlock_*. If it causes problems, you could take it
+ // out, but then you'd have to unset CERES_HAVE_RWLOCK (at least on linux --
+ // it *does* cause problems for FreeBSD, or MacOSX, but isn't needed for
+ // locking there.)
+# if defined(__linux__) && !defined(_XOPEN_SOURCE)
+# define _XOPEN_SOURCE 500 // may be needed to get the rwlock calls
+# endif
+# include <pthread.h>
+ typedef pthread_rwlock_t MutexType;
+#elif defined(CERES_HAVE_PTHREAD)
+# include <pthread.h>
+ typedef pthread_mutex_t MutexType;
+#else
+# error Need to implement mutex.h for your architecture, or #define NO_THREADS
+#endif
+
+// We need to include these header files after defining _XOPEN_SOURCE
+// as they may define the _XOPEN_SOURCE macro.
+#include <assert.h>
+#include <stdlib.h> // for abort()
+
+namespace ceres {
+namespace internal {
+
+class Mutex {
+ public:
+ // Create a Mutex that is not held by anybody. This constructor is
+ // typically used for Mutexes allocated on the heap or the stack.
+ // See below for a recommendation for constructing global Mutex
+ // objects.
+ inline Mutex();
+
+ // Destructor
+ inline ~Mutex();
+
+ inline void Lock(); // Block if needed until free then acquire exclusively
+ inline void Unlock(); // Release a lock acquired via Lock()
+#ifdef CERES_GMUTEX_TRYLOCK
+ inline bool TryLock(); // If free, Lock() and return true, else return false
+#endif
+ // Note that on systems that don't support read-write locks, these may
+ // be implemented as synonyms to Lock() and Unlock(). So you can use
+ // these for efficiency, but don't use them anyplace where being able
+ // to do shared reads is necessary to avoid deadlock.
+ inline void ReaderLock(); // Block until free or shared then acquire a share
+ inline void ReaderUnlock(); // Release a read share of this Mutex
+ inline void WriterLock() { Lock(); } // Acquire an exclusive lock
+ inline void WriterUnlock() { Unlock(); } // Release a lock from WriterLock()
+
+ // TODO(hamaji): Do nothing, implement correctly.
+ inline void AssertHeld() {}
+
+ private:
+ MutexType mutex_;
+ // We want to make sure that the compiler sets is_safe_ to true only
+ // when we tell it to, and never makes assumptions is_safe_ is
+ // always true. volatile is the most reliable way to do that.
+ volatile bool is_safe_;
+
+ inline void SetIsSafe() { is_safe_ = true; }
+
+ // Catch the error of writing Mutex when intending MutexLock.
+ Mutex(Mutex* /*ignored*/) {}
+ // Disallow "evil" constructors
+ Mutex(const Mutex&);
+ void operator=(const Mutex&);
+};
+
+// Now the implementation of Mutex for various systems
+#if defined(CERES_NO_THREADS)
+
+// When we don't have threads, we can be either reading or writing,
+// but not both. We can have lots of readers at once (in no-threads
+// mode, that's most likely to happen in recursive function calls),
+// but only one writer. We represent this by having mutex_ be -1 when
+// writing and a number > 0 when reading (and 0 when no lock is held).
+//
+// In debug mode, we assert these invariants, while in non-debug mode
+// we do nothing, for efficiency. That's why everything is in an
+// assert.
+
+Mutex::Mutex() : mutex_(0) { }
+Mutex::~Mutex() { assert(mutex_ == 0); }
+void Mutex::Lock() { assert(--mutex_ == -1); }
+void Mutex::Unlock() { assert(mutex_++ == -1); }
+#ifdef CERES_GMUTEX_TRYLOCK
+bool Mutex::TryLock() { if (mutex_) return false; Lock(); return true; }
+#endif
+void Mutex::ReaderLock() { assert(++mutex_ > 0); }
+void Mutex::ReaderUnlock() { assert(mutex_-- > 0); }
+
+#elif defined(_WIN32) || defined(__CYGWIN32__) || defined(__CYGWIN64__)
+
+Mutex::Mutex() { InitializeCriticalSection(&mutex_); SetIsSafe(); }
+Mutex::~Mutex() { DeleteCriticalSection(&mutex_); }
+void Mutex::Lock() { if (is_safe_) EnterCriticalSection(&mutex_); }
+void Mutex::Unlock() { if (is_safe_) LeaveCriticalSection(&mutex_); }
+#ifdef GMUTEX_TRYLOCK
+bool Mutex::TryLock() { return is_safe_ ?
+ TryEnterCriticalSection(&mutex_) != 0 : true; }
+#endif
+void Mutex::ReaderLock() { Lock(); } // we don't have read-write locks
+void Mutex::ReaderUnlock() { Unlock(); }
+
+#elif defined(CERES_HAVE_PTHREAD) && defined(CERES_HAVE_RWLOCK)
+
+#define CERES_SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
+ if (is_safe_ && fncall(&mutex_) != 0) abort(); \
+} while (0)
+
+Mutex::Mutex() {
+ SetIsSafe();
+ if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
+}
+Mutex::~Mutex() { CERES_SAFE_PTHREAD(pthread_rwlock_destroy); }
+void Mutex::Lock() { CERES_SAFE_PTHREAD(pthread_rwlock_wrlock); }
+void Mutex::Unlock() { CERES_SAFE_PTHREAD(pthread_rwlock_unlock); }
+#ifdef CERES_GMUTEX_TRYLOCK
+bool Mutex::TryLock() { return is_safe_ ?
+ pthread_rwlock_trywrlock(&mutex_) == 0 :
+ true; }
+#endif
+void Mutex::ReaderLock() { CERES_SAFE_PTHREAD(pthread_rwlock_rdlock); }
+void Mutex::ReaderUnlock() { CERES_SAFE_PTHREAD(pthread_rwlock_unlock); }
+#undef CERES_SAFE_PTHREAD
+
+#elif defined(CERES_HAVE_PTHREAD)
+
+#define CERES_SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
+ if (is_safe_ && fncall(&mutex_) != 0) abort(); \
+} while (0)
+
+Mutex::Mutex() {
+ SetIsSafe();
+ if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
+}
+Mutex::~Mutex() { CERES_SAFE_PTHREAD(pthread_mutex_destroy); }
+void Mutex::Lock() { CERES_SAFE_PTHREAD(pthread_mutex_lock); }
+void Mutex::Unlock() { CERES_SAFE_PTHREAD(pthread_mutex_unlock); }
+#ifdef CERES_GMUTEX_TRYLOCK
+bool Mutex::TryLock() { return is_safe_ ?
+ pthread_mutex_trylock(&mutex_) == 0 : true; }
+#endif
+void Mutex::ReaderLock() { Lock(); }
+void Mutex::ReaderUnlock() { Unlock(); }
+#undef CERES_SAFE_PTHREAD
+
+#endif
+
+// --------------------------------------------------------------------------
+// Some helper classes
+
+// Note: The weird "Ceres" prefix for the class is a workaround for having two
+// similar mutex.h files included in the same translation unit. This is a
+// problem because macros do not respect C++ namespaces, and as a result, this
+// does not work well (e.g. inside Chrome). The offending macros are
+// "MutexLock(x) COMPILE_ASSERT(false)". To work around this, "Ceres" is
+// prefixed to the class names; this permits defining the classes.
+
+// CeresMutexLock(mu) acquires mu when constructed and releases it when destroyed.
+class CeresMutexLock {
+ public:
+ explicit CeresMutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); }
+ ~CeresMutexLock() { mu_->Unlock(); }
+ private:
+ Mutex * const mu_;
+ // Disallow "evil" constructors
+ CeresMutexLock(const CeresMutexLock&);
+ void operator=(const CeresMutexLock&);
+};
+
+// CeresReaderMutexLock and CeresWriterMutexLock do the same, for rwlocks
+class CeresReaderMutexLock {
+ public:
+ explicit CeresReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); }
+ ~CeresReaderMutexLock() { mu_->ReaderUnlock(); }
+ private:
+ Mutex * const mu_;
+ // Disallow "evil" constructors
+ CeresReaderMutexLock(const CeresReaderMutexLock&);
+ void operator=(const CeresReaderMutexLock&);
+};
+
+class CeresWriterMutexLock {
+ public:
+ explicit CeresWriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); }
+ ~CeresWriterMutexLock() { mu_->WriterUnlock(); }
+ private:
+ Mutex * const mu_;
+ // Disallow "evil" constructors
+ CeresWriterMutexLock(const CeresWriterMutexLock&);
+ void operator=(const CeresWriterMutexLock&);
+};
+
+// Catch bug where variable name is omitted, e.g. MutexLock (&mu);
+#define CeresMutexLock(x) \
+ COMPILE_ASSERT(0, ceres_mutex_lock_decl_missing_var_name)
+#define CeresReaderMutexLock(x) \
+ COMPILE_ASSERT(0, ceres_rmutex_lock_decl_missing_var_name)
+#define CeresWriterMutexLock(x) \
+ COMPILE_ASSERT(0, ceres_wmutex_lock_decl_missing_var_name)
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_MUTEX_H_
diff --git a/internal/ceres/normal_prior.cc b/internal/ceres/normal_prior.cc
new file mode 100644
index 0000000..392d728
--- /dev/null
+++ b/internal/ceres/normal_prior.cc
@@ -0,0 +1,66 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/normal_prior.h"
+
+#include <cstddef>
+#include <vector>
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+NormalPrior::NormalPrior(const Matrix& A, const Vector& b)
+ : A_(A), b_(b) {
+ CHECK_GT(b_.rows(), 0);
+ CHECK_GT(A_.rows(), 0);
+ CHECK_EQ(b_.rows(), A.cols());
+ set_num_residuals(A_.rows());
+ mutable_parameter_block_sizes()->push_back(b_.rows());
+}
+
+bool NormalPrior::Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ ConstVectorRef p(parameters[0], parameter_block_sizes()[0]);
+ VectorRef r(residuals, num_residuals());
+ // The following line should read
+ // r = A_ * (p - b_);
+ // The extra eval is to get around a bug in the eigen library.
+ r = A_ * (p - b_).eval();
+ if ((jacobians != NULL) && (jacobians[0] != NULL)) {
+ MatrixRef(jacobians[0], num_residuals(), parameter_block_sizes()[0]) = A_;
+ }
+ return true;
+}
+
+} // namespace ceres
diff --git a/internal/ceres/normal_prior_test.cc b/internal/ceres/normal_prior_test.cc
new file mode 100644
index 0000000..67af881
--- /dev/null
+++ b/internal/ceres/normal_prior_test.cc
@@ -0,0 +1,133 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/normal_prior.h"
+
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/random.h"
+
+namespace ceres {
+namespace internal {
+
+void RandomVector(Vector* v) {
+ for (int r = 0; r < v->rows(); ++r)
+ (*v)[r] = 2 * RandDouble() - 1;
+}
+
+void RandomMatrix(Matrix* m) {
+ for (int r = 0; r < m->rows(); ++r) {
+ for (int c = 0; c < m->cols(); ++c) {
+ (*m)(r, c) = 2 * RandDouble() - 1;
+ }
+ }
+}
+
+TEST(NormalPriorTest, ResidualAtRandomPosition) {
+ srand(5);
+
+ for (int num_rows = 1; num_rows < 5; ++num_rows) {
+ for (int num_cols = 1; num_cols < 5; ++num_cols) {
+ Vector b(num_cols);
+ RandomVector(&b);
+
+ Matrix A(num_rows, num_cols);
+ RandomMatrix(&A);
+
+ double * x = new double[num_cols];
+ for (int i = 0; i < num_cols; ++i)
+ x[i] = 2 * RandDouble() - 1;
+
+ double * jacobian = new double[num_rows * num_cols];
+ Vector residuals(num_rows);
+
+ NormalPrior prior(A, b);
+ prior.Evaluate(&x, residuals.data(), &jacobian);
+
+ // Compare the norm of the residual
+ double residual_diff_norm =
+ (residuals - A * (VectorRef(x, num_cols) - b)).squaredNorm();
+ EXPECT_NEAR(residual_diff_norm, 0, 1e-10);
+
+ // Compare the jacobians
+ MatrixRef J(jacobian, num_rows, num_cols);
+ double jacobian_diff_norm = (J - A).norm();
+ EXPECT_NEAR(jacobian_diff_norm, 0.0, 1e-10);
+
+ delete []x;
+ delete []jacobian;
+ }
+ }
+}
+
+TEST(NormalPriorTest, ResidualAtRandomPositionNullJacobians) {
+ srand(5);
+
+ for (int num_rows = 1; num_rows < 5; ++num_rows) {
+ for (int num_cols = 1; num_cols < 5; ++num_cols) {
+ Vector b(num_cols);
+ RandomVector(&b);
+
+ Matrix A(num_rows, num_cols);
+ RandomMatrix(&A);
+
+ double * x = new double[num_cols];
+ for (int i = 0; i < num_cols; ++i)
+ x[i] = 2 * RandDouble() - 1;
+
+ double* jacobians[1];
+ jacobians[0] = NULL;
+
+ Vector residuals(num_rows);
+
+ NormalPrior prior(A, b);
+ prior.Evaluate(&x, residuals.data(), jacobians);
+
+ // Compare the norm of the residual
+ double residual_diff_norm =
+ (residuals - A * (VectorRef(x, num_cols) - b)).squaredNorm();
+ EXPECT_NEAR(residual_diff_norm, 0, 1e-10);
+
+ prior.Evaluate(&x, residuals.data(), NULL);
+ // Compare the norm of the residual
+ residual_diff_norm =
+ (residuals - A * (VectorRef(x, num_cols) - b)).squaredNorm();
+ EXPECT_NEAR(residual_diff_norm, 0, 1e-10);
+
+
+ delete []x;
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/numeric_diff_cost_function_test.cc b/internal/ceres/numeric_diff_cost_function_test.cc
new file mode 100644
index 0000000..df12eb9
--- /dev/null
+++ b/internal/ceres/numeric_diff_cost_function_test.cc
@@ -0,0 +1,271 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/numeric_diff_cost_function.h"
+
+#include <algorithm>
+#include <cmath>
+#include <string>
+#include <vector>
+#include "ceres/cost_function.h"
+#include "ceres/internal/macros.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/stringprintf.h"
+#include "ceres/test_util.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// y1 = x1'x2 -> dy1/dx1 = x2, dy1/dx2 = x1
+// y2 = (x1'x2)^2 -> dy2/dx1 = 2 * x2 * (x1'x2), dy2/dx2 = 2 * x1 * (x1'x2)
+// y3 = x2'x2 -> dy3/dx1 = 0, dy3/dx2 = 2 * x2
+class TestCostFunction : public CostFunction {
+ public:
+ TestCostFunction() {
+ set_num_residuals(3);
+ mutable_parameter_block_sizes()->push_back(5); // x1.
+ mutable_parameter_block_sizes()->push_back(5); // x2.
+ }
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ (void) jacobians; // Ignored.
+
+ residuals[0] = residuals[1] = residuals[2] = 0;
+ for (int i = 0; i < 5; ++i) {
+ residuals[0] += parameters[0][i] * parameters[1][i];
+ residuals[2] += parameters[1][i] * parameters[1][i];
+ }
+ residuals[1] = residuals[0] * residuals[0];
+ return true;
+ }
+};
+
+TEST(NumericDiffCostFunction, EasyCase) {
+ // Try both central and forward difference.
+ internal::scoped_ptr<CostFunction> cfs[2];
+ cfs[0].reset(
+ new NumericDiffCostFunction<TestCostFunction,
+ CENTRAL,
+ 3, /* number of residuals */
+ 5, /* size of x1 */
+ 5 /* size of x2 */>(
+ new TestCostFunction, TAKE_OWNERSHIP));
+
+ cfs[1].reset(
+ new NumericDiffCostFunction<TestCostFunction,
+ FORWARD,
+ 3, /* number of residuals */
+ 5, /* size of x1 */
+ 5 /* size of x2 */>(
+ new TestCostFunction, TAKE_OWNERSHIP));
+
+ for (int c = 0; c < 2; ++c) {
+ CostFunction *cost_function = cfs[c].get();
+
+ double x1[] = { 1.0, 2.0, 3.0, 4.0, 5.0 };
+ double x2[] = { 9.0, 9.0, 5.0, 5.0, 1.0 };
+ double *parameters[] = { &x1[0], &x2[0] };
+
+ double dydx1[15]; // 3 x 5, row major.
+ double dydx2[15]; // 3 x 5, row major.
+ double *jacobians[2] = { &dydx1[0], &dydx2[0] };
+
+ double residuals[3] = {-1e-100, -2e-100, -3e-100 };
+
+ ASSERT_TRUE(cost_function->Evaluate(&parameters[0],
+ &residuals[0],
+ &jacobians[0]));
+
+ EXPECT_EQ(residuals[0], 67);
+ EXPECT_EQ(residuals[1], 4489);
+ EXPECT_EQ(residuals[2], 213);
+
+ for (int i = 0; i < 5; ++i) {
+ LOG(INFO) << "c = " << c << " i = " << i;
+ const double kEps = c == 0 ? /* central */ 3e-9 : /* forward */ 2e-5;
+
+ ExpectClose(x2[i], dydx1[5 * 0 + i], kEps); // y1
+ ExpectClose(x1[i], dydx2[5 * 0 + i], kEps);
+ ExpectClose(2 * x2[i] * residuals[0], dydx1[5 * 1 + i], kEps); // y2
+ ExpectClose(2 * x1[i] * residuals[0], dydx2[5 * 1 + i], kEps);
+ ExpectClose(0.0, dydx1[5 * 2 + i], kEps); // y3
+ ExpectClose(2 * x2[i], dydx2[5 * 2 + i], kEps);
+ }
+ }
+}
+
+// y1 = sin(x1'x2)
+// y2 = exp(-x1'x2 / 10)
+//
+// dy1/dx1 = x2 * cos(x1'x2), dy1/dx2 = x1 * cos(x1'x2)
+// dy2/dx1 = -x2 * exp(-x1'x2 / 10) / 10, dy2/dx2 = -x2 * exp(-x1'x2 / 10) / 10
+class TranscendentalTestCostFunction : public CostFunction {
+ public:
+ TranscendentalTestCostFunction() {
+ set_num_residuals(2);
+ mutable_parameter_block_sizes()->push_back(5); // x1.
+ mutable_parameter_block_sizes()->push_back(5); // x2.
+ }
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ (void) jacobians; // Ignored.
+
+ double x1x2 = 0;
+ for (int i = 0; i < 5; ++i) {
+ x1x2 += parameters[0][i] * parameters[1][i];
+ }
+ residuals[0] = sin(x1x2);
+ residuals[1] = exp(-x1x2 / 10);
+ return true;
+ }
+};
+
+TEST(NumericDiffCostFunction, TransendentalOperationsInCostFunction) {
+ // Try both central and forward difference.
+ internal::scoped_ptr<CostFunction> cfs[2];
+ cfs[0].reset(
+ new NumericDiffCostFunction<TranscendentalTestCostFunction,
+ CENTRAL,
+ 2, /* number of residuals */
+ 5, /* size of x1 */
+ 5 /* size of x2 */>(
+ new TranscendentalTestCostFunction, TAKE_OWNERSHIP));
+
+ cfs[1].reset(
+ new NumericDiffCostFunction<TranscendentalTestCostFunction,
+ FORWARD,
+ 2, /* number of residuals */
+ 5, /* size of x1 */
+ 5 /* size of x2 */>(
+ new TranscendentalTestCostFunction, TAKE_OWNERSHIP));
+
+ for (int c = 0; c < 2; ++c) {
+ CostFunction *cost_function = cfs[c].get();
+
+ struct {
+ double x1[5];
+ double x2[5];
+ } kTests[] = {
+ { { 1.0, 2.0, 3.0, 4.0, 5.0 }, // No zeros.
+ { 9.0, 9.0, 5.0, 5.0, 1.0 },
+ },
+ { { 0.0, 2.0, 3.0, 0.0, 5.0 }, // Some zeros x1.
+ { 9.0, 9.0, 5.0, 5.0, 1.0 },
+ },
+ { { 1.0, 2.0, 3.0, 1.0, 5.0 }, // Some zeros x2.
+ { 0.0, 9.0, 0.0, 5.0, 0.0 },
+ },
+ { { 0.0, 0.0, 0.0, 0.0, 0.0 }, // All zeros x1.
+ { 9.0, 9.0, 5.0, 5.0, 1.0 },
+ },
+ { { 1.0, 2.0, 3.0, 4.0, 5.0 }, // All zeros x2.
+ { 0.0, 0.0, 0.0, 0.0, 0.0 },
+ },
+ { { 0.0, 0.0, 0.0, 0.0, 0.0 }, // All zeros.
+ { 0.0, 0.0, 0.0, 0.0, 0.0 },
+ },
+ };
+ for (int k = 0; k < CERES_ARRAYSIZE(kTests); ++k) {
+ double *x1 = &(kTests[k].x1[0]);
+ double *x2 = &(kTests[k].x2[0]);
+ double *parameters[] = { x1, x2 };
+
+ double dydx1[10];
+ double dydx2[10];
+ double *jacobians[2] = { &dydx1[0], &dydx2[0] };
+
+ double residuals[2];
+
+ ASSERT_TRUE(cost_function->Evaluate(&parameters[0],
+ &residuals[0],
+ &jacobians[0]));
+ LOG(INFO) << "Ran evaluate for test k=" << k << " c=" << c;
+
+ double x1x2 = 0;
+ for (int i = 0; i < 5; ++i) {
+ x1x2 += x1[i] * x2[i];
+ }
+
+ for (int i = 0; i < 5; ++i) {
+ const double kEps = c == 0 ? /* central */ 3e-9 : /* forward */ 2e-5;
+
+ ExpectClose( x2[i] * cos(x1x2), dydx1[5 * 0 + i], kEps);
+ ExpectClose( x1[i] * cos(x1x2), dydx2[5 * 0 + i], kEps);
+ ExpectClose(-x2[i] * exp(-x1x2 / 10.) / 10., dydx1[5 * 1 + i], kEps);
+ ExpectClose(-x1[i] * exp(-x1x2 / 10.) / 10., dydx2[5 * 1 + i], kEps);
+ }
+ }
+ }
+}
+
+
+template<int num_rows, int num_cols>
+class SizeTestingCostFunction : public SizedCostFunction<num_rows, num_cols> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ return true;
+ }
+};
+
+// As described in
+// http://forum.kde.org/viewtopic.php?f=74&t=98536#p210774
+// Eigen3 has restrictions on the Row/Column major storage of vectors,
+// depending on their dimensions. This test ensures that the correct
+// templates are instantiated for various shapes of the Jacobian
+// matrix.
+TEST(NumericDiffCostFunction, EigenRowMajorColMajorTest) {
+ scoped_ptr<CostFunction> cost_function;
+ cost_function.reset(
+ new NumericDiffCostFunction<SizeTestingCostFunction<1,1>, CENTRAL, 1, 1>(
+ new SizeTestingCostFunction<1,1>, ceres::TAKE_OWNERSHIP));
+
+ cost_function.reset(
+ new NumericDiffCostFunction<SizeTestingCostFunction<2,1>, CENTRAL, 2, 1>(
+ new SizeTestingCostFunction<2,1>, ceres::TAKE_OWNERSHIP));
+
+ cost_function.reset(
+ new NumericDiffCostFunction<SizeTestingCostFunction<1,2>, CENTRAL, 1, 2>(
+ new SizeTestingCostFunction<1,2>, ceres::TAKE_OWNERSHIP));
+
+ cost_function.reset(
+ new NumericDiffCostFunction<SizeTestingCostFunction<2,2>, CENTRAL, 2, 2>(
+ new SizeTestingCostFunction<2,2>, ceres::TAKE_OWNERSHIP));
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/ordered_groups_test.cc b/internal/ceres/ordered_groups_test.cc
new file mode 100644
index 0000000..700e788
--- /dev/null
+++ b/internal/ceres/ordered_groups_test.cc
@@ -0,0 +1,163 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/ordered_groups.h"
+
+#include <cstddef>
+#include <vector>
+#include "gtest/gtest.h"
+#include "ceres/collections_port.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(OrderedGroup, EmptyOrderedGroupBehavesCorrectly) {
+ ParameterBlockOrdering ordering;
+ EXPECT_EQ(ordering.NumGroups(), 0);
+ EXPECT_EQ(ordering.NumElements(), 0);
+ EXPECT_EQ(ordering.GroupSize(1), 0);
+ double x;
+ EXPECT_EQ(ordering.GroupId(&x), -1);
+ EXPECT_FALSE(ordering.Remove(&x));
+}
+
+TEST(OrderedGroup, EverythingInOneGroup) {
+ ParameterBlockOrdering ordering;
+ double x[3];
+ ordering.AddElementToGroup(x, 1);
+ ordering.AddElementToGroup(x + 1, 1);
+ ordering.AddElementToGroup(x + 2, 1);
+ ordering.AddElementToGroup(x, 1);
+
+ EXPECT_EQ(ordering.NumGroups(), 1);
+ EXPECT_EQ(ordering.NumElements(), 3);
+ EXPECT_EQ(ordering.GroupSize(1), 3);
+ EXPECT_EQ(ordering.GroupSize(0), 0);
+ EXPECT_EQ(ordering.GroupId(x), 1);
+ EXPECT_EQ(ordering.GroupId(x + 1), 1);
+ EXPECT_EQ(ordering.GroupId(x + 2), 1);
+
+ ordering.Remove(x);
+ EXPECT_EQ(ordering.NumGroups(), 1);
+ EXPECT_EQ(ordering.NumElements(), 2);
+ EXPECT_EQ(ordering.GroupSize(1), 2);
+ EXPECT_EQ(ordering.GroupSize(0), 0);
+
+ EXPECT_EQ(ordering.GroupId(x), -1);
+ EXPECT_EQ(ordering.GroupId(x + 1), 1);
+ EXPECT_EQ(ordering.GroupId(x + 2), 1);
+}
+
+TEST(OrderedGroup, StartInOneGroupAndThenSplit) {
+ ParameterBlockOrdering ordering;
+ double x[3];
+ ordering.AddElementToGroup(x, 1);
+ ordering.AddElementToGroup(x + 1, 1);
+ ordering.AddElementToGroup(x + 2, 1);
+ ordering.AddElementToGroup(x, 1);
+
+ EXPECT_EQ(ordering.NumGroups(), 1);
+ EXPECT_EQ(ordering.NumElements(), 3);
+ EXPECT_EQ(ordering.GroupSize(1), 3);
+ EXPECT_EQ(ordering.GroupSize(0), 0);
+ EXPECT_EQ(ordering.GroupId(x), 1);
+ EXPECT_EQ(ordering.GroupId(x + 1), 1);
+ EXPECT_EQ(ordering.GroupId(x + 2), 1);
+
+ ordering.AddElementToGroup(x, 5);
+ EXPECT_EQ(ordering.NumGroups(), 2);
+ EXPECT_EQ(ordering.NumElements(), 3);
+ EXPECT_EQ(ordering.GroupSize(1), 2);
+ EXPECT_EQ(ordering.GroupSize(5), 1);
+ EXPECT_EQ(ordering.GroupSize(0), 0);
+
+ EXPECT_EQ(ordering.GroupId(x), 5);
+ EXPECT_EQ(ordering.GroupId(x + 1), 1);
+ EXPECT_EQ(ordering.GroupId(x + 2), 1);
+}
+
+TEST(OrderedGroup, AddAndRemoveEveryThingFromOneGroup) {
+ ParameterBlockOrdering ordering;
+ double x[3];
+ ordering.AddElementToGroup(x, 1);
+ ordering.AddElementToGroup(x + 1, 1);
+ ordering.AddElementToGroup(x + 2, 1);
+ ordering.AddElementToGroup(x, 1);
+
+ EXPECT_EQ(ordering.NumGroups(), 1);
+ EXPECT_EQ(ordering.NumElements(), 3);
+ EXPECT_EQ(ordering.GroupSize(1), 3);
+ EXPECT_EQ(ordering.GroupSize(0), 0);
+ EXPECT_EQ(ordering.GroupId(x), 1);
+ EXPECT_EQ(ordering.GroupId(x + 1), 1);
+ EXPECT_EQ(ordering.GroupId(x + 2), 1);
+
+ ordering.AddElementToGroup(x, 5);
+ ordering.AddElementToGroup(x + 1, 5);
+ ordering.AddElementToGroup(x + 2, 5);
+ EXPECT_EQ(ordering.NumGroups(), 1);
+ EXPECT_EQ(ordering.NumElements(), 3);
+ EXPECT_EQ(ordering.GroupSize(1), 0);
+ EXPECT_EQ(ordering.GroupSize(5), 3);
+ EXPECT_EQ(ordering.GroupSize(0), 0);
+
+ EXPECT_EQ(ordering.GroupId(x), 5);
+ EXPECT_EQ(ordering.GroupId(x + 1), 5);
+ EXPECT_EQ(ordering.GroupId(x + 2), 5);
+}
+
+TEST(OrderedGroup, ReverseOrdering) {
+ ParameterBlockOrdering ordering;
+ double x[3];
+ ordering.AddElementToGroup(x, 1);
+ ordering.AddElementToGroup(x + 1, 2);
+ ordering.AddElementToGroup(x + 2, 2);
+
+ EXPECT_EQ(ordering.NumGroups(), 2);
+ EXPECT_EQ(ordering.NumElements(), 3);
+ EXPECT_EQ(ordering.GroupSize(1), 1);
+ EXPECT_EQ(ordering.GroupSize(2), 2);
+ EXPECT_EQ(ordering.GroupId(x), 1);
+ EXPECT_EQ(ordering.GroupId(x + 1), 2);
+ EXPECT_EQ(ordering.GroupId(x + 2), 2);
+
+ ordering.Reverse();
+
+ EXPECT_EQ(ordering.NumGroups(), 2);
+ EXPECT_EQ(ordering.NumElements(), 3);
+ EXPECT_EQ(ordering.GroupSize(3), 1);
+ EXPECT_EQ(ordering.GroupSize(2), 2);
+ EXPECT_EQ(ordering.GroupId(x), 3);
+ EXPECT_EQ(ordering.GroupId(x + 1), 2);
+ EXPECT_EQ(ordering.GroupId(x + 2), 2);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/parameter_block.h b/internal/ceres/parameter_block.h
new file mode 100644
index 0000000..f20805c
--- /dev/null
+++ b/internal/ceres/parameter_block.h
@@ -0,0 +1,271 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_PARAMETER_BLOCK_H_
+#define CERES_INTERNAL_PARAMETER_BLOCK_H_
+
+#include <cstdlib>
+#include <string>
+#include "ceres/array_utils.h"
+#include "ceres/integral_types.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/stringprintf.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+class ProblemImpl;
+
+// The parameter block encodes the location of the user's original value, and
+// also the "current state" of the parameter. The evaluator uses whatever is in
+// the current state of the parameter when evaluating. This is inlined since the
+// methods are performance sensitive.
+//
+// The class is not thread-safe, unless only const methods are called. The
+// parameter block may also hold a pointer to a local parameterization; the
+// parameter block does not take ownership of this pointer, so the user is
+// responsible for the proper disposal of the local parameterization.
+class ParameterBlock {
+ public:
+ ParameterBlock(double* user_state, int size) {
+ Init(user_state, size, NULL);
+ }
+ ParameterBlock(double* user_state,
+ int size,
+ LocalParameterization* local_parameterization) {
+ Init(user_state, size, local_parameterization);
+ }
+
+ // The size of the parameter block.
+ int Size() const { return size_; }
+
+ // Manipulate the parameter state.
+ bool SetState(const double* x) {
+ CHECK(x != NULL)
+ << "Tried to set the state of constant parameter "
+ << "with user location " << user_state_;
+ CHECK(!is_constant_)
+ << "Tried to set the state of constant parameter "
+ << "with user location " << user_state_;
+
+ state_ = x;
+ return UpdateLocalParameterizationJacobian();
+ }
+
+ // Copy the current parameter state out to x. This is "GetState()" rather than
+ // simply "state()" since it is actively copying the data into the passed
+ // pointer.
+ void GetState(double *x) const {
+ if (x != state_) {
+ memcpy(x, state_, sizeof(*state_) * size_);
+ }
+ }
+
+ // Direct pointers to the current state.
+ const double* state() const { return state_; }
+ const double* user_state() const { return user_state_; }
+ double* mutable_user_state() { return user_state_; }
+ LocalParameterization* local_parameterization() const {
+ return local_parameterization_;
+ }
+ LocalParameterization* mutable_local_parameterization() {
+ return local_parameterization_;
+ }
+
+ // Set this parameter block to vary or not.
+ void SetConstant() { is_constant_ = true; }
+ void SetVarying() { is_constant_ = false; }
+ bool IsConstant() const { return is_constant_; }
+
+ // This parameter block's index in an array.
+ int index() const { return index_; }
+ void set_index(int index) { index_ = index; }
+
+ // This parameter offset inside a larger state vector.
+ int state_offset() const { return state_offset_; }
+ void set_state_offset(int state_offset) { state_offset_ = state_offset; }
+
+ // This parameter offset inside a larger delta vector.
+ int delta_offset() const { return delta_offset_; }
+ void set_delta_offset(int delta_offset) { delta_offset_ = delta_offset; }
+
+ // Methods relating to the parameter block's parameterization.
+
+ // The local to global jacobian. Returns NULL if there is no local
+ // parameterization for this parameter block. The returned matrix is row-major
+ // and has Size() rows and LocalSize() columns.
+ const double* LocalParameterizationJacobian() const {
+ return local_parameterization_jacobian_.get();
+ }
+
+ int LocalSize() const {
+ return (local_parameterization_ == NULL)
+ ? size_
+ : local_parameterization_->LocalSize();
+ }
+
+ // Set the parameterization. The parameterization can be set exactly once;
+ // multiple calls to set the parameterization to different values will crash.
+ // It is an error to pass NULL for the parameterization. The parameter block
+ // does not take ownership of the parameterization.
+ void SetParameterization(LocalParameterization* new_parameterization) {
+ CHECK(new_parameterization != NULL) << "NULL parameterization invalid.";
+ CHECK(new_parameterization->GlobalSize() == size_)
+ << "Invalid parameterization for parameter block. The parameter block "
+ << "has size " << size_ << " while the parameterization has a global "
+ << "size of " << new_parameterization->GlobalSize() << ". Did you "
+ << "accidentally use the wrong parameter block or parameterization?";
+ if (new_parameterization != local_parameterization_) {
+ CHECK(local_parameterization_ == NULL)
+ << "Can't re-set the local parameterization; it leads to "
+ << "ambiguous ownership.";
+ local_parameterization_ = new_parameterization;
+ local_parameterization_jacobian_.reset(
+ new double[local_parameterization_->GlobalSize() *
+ local_parameterization_->LocalSize()]);
+ CHECK(UpdateLocalParameterizationJacobian())
+ "Local parameterization Jacobian computation failed"
+ "for x: " << ConstVectorRef(state_, Size()).transpose();
+ } else {
+ // Ignore the case that the parameterizations match.
+ }
+ }
+
+ // Generalization of the addition operation. This is the same as
+ // LocalParameterization::Plus() but uses the parameter's current state
+ // instead of operating on a passed in pointer.
+ bool Plus(const double *x, const double* delta, double* x_plus_delta) {
+ if (local_parameterization_ == NULL) {
+ VectorRef(x_plus_delta, size_) = ConstVectorRef(x, size_) +
+ ConstVectorRef(delta, size_);
+ return true;
+ }
+ return local_parameterization_->Plus(x, delta, x_plus_delta);
+ }
+
+ string ToString() const {
+ return StringPrintf("{ user_state=%p, state=%p, size=%d, "
+ "constant=%d, index=%d, state_offset=%d, "
+ "delta_offset=%d }",
+ user_state_,
+ state_,
+ size_,
+ is_constant_,
+ index_,
+ state_offset_,
+ delta_offset_);
+ }
+
+ private:
+ void Init(double* user_state,
+ int size,
+ LocalParameterization* local_parameterization) {
+ user_state_ = user_state;
+ size_ = size;
+ is_constant_ = false;
+ state_ = user_state_;
+
+ local_parameterization_ = NULL;
+ if (local_parameterization != NULL) {
+ SetParameterization(local_parameterization);
+ }
+
+ index_ = -1;
+ state_offset_ = -1;
+ delta_offset_ = -1;
+ }
+
+ bool UpdateLocalParameterizationJacobian() {
+ if (local_parameterization_ == NULL) {
+ return true;
+ }
+
+ // Update the local to global Jacobian. In some cases this is
+ // wasted effort; if this is a bottleneck, we will find a solution
+ // at that time.
+
+ const int jacobian_size = Size() * LocalSize();
+ InvalidateArray(jacobian_size,
+ local_parameterization_jacobian_.get());
+ if (!local_parameterization_->ComputeJacobian(
+ state_,
+ local_parameterization_jacobian_.get())) {
+ LOG(WARNING) << "Local parameterization Jacobian computation failed"
+ "for x: " << ConstVectorRef(state_, Size()).transpose();
+ return false;
+ }
+
+ if (!IsArrayValid(jacobian_size, local_parameterization_jacobian_.get())) {
+ LOG(WARNING) << "Local parameterization Jacobian computation returned"
+ << "an invalid matrix for x: "
+ << ConstVectorRef(state_, Size()).transpose()
+ << "\n Jacobian matrix : "
+ << ConstMatrixRef(local_parameterization_jacobian_.get(),
+ Size(),
+ LocalSize());
+ return false;
+ }
+ return true;
+ }
+
+ double* user_state_;
+ int size_;
+ bool is_constant_;
+ LocalParameterization* local_parameterization_;
+
+ // The "state" of the parameter. These fields are only needed while the
+ // solver is running. While at first glance using mutable is a bad idea, this
+ // ends up simplifying the internals of Ceres enough to justify the potential
+ // pitfalls of using "mutable."
+ mutable const double* state_;
+ mutable scoped_array<double> local_parameterization_jacobian_;
+
+ // The index of the parameter. This is used by various other parts of Ceres to
+ // permit switching from a ParameterBlock* to an index in another array.
+ int32 index_;
+
+ // The offset of this parameter block inside a larger state vector.
+ int32 state_offset_;
+
+ // The offset of this parameter block inside a larger delta vector.
+ int32 delta_offset_;
+
+ // Necessary so ProblemImpl can clean up the parameterizations.
+ friend class ProblemImpl;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_PARAMETER_BLOCK_H_
diff --git a/internal/ceres/parameter_block_ordering.cc b/internal/ceres/parameter_block_ordering.cc
new file mode 100644
index 0000000..e8f626f
--- /dev/null
+++ b/internal/ceres/parameter_block_ordering.cc
@@ -0,0 +1,122 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/parameter_block_ordering.h"
+
+#include "ceres/graph.h"
+#include "ceres/graph_algorithms.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/map_util.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+int ComputeSchurOrdering(const Program& program,
+ vector<ParameterBlock*>* ordering) {
+ CHECK_NOTNULL(ordering)->clear();
+
+ scoped_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+ int independent_set_size = IndependentSetOrdering(*graph, ordering);
+ const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
+
+ // Add the excluded blocks to back of the ordering vector.
+ for (int i = 0; i < parameter_blocks.size(); ++i) {
+ ParameterBlock* parameter_block = parameter_blocks[i];
+ if (parameter_block->IsConstant()) {
+ ordering->push_back(parameter_block);
+ }
+ }
+
+ return independent_set_size;
+}
+
+void ComputeRecursiveIndependentSetOrdering(const Program& program,
+ ParameterBlockOrdering* ordering) {
+ CHECK_NOTNULL(ordering)->Clear();
+ const vector<ParameterBlock*> parameter_blocks = program.parameter_blocks();
+ scoped_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+
+ int num_covered = 0;
+ int round = 0;
+ while (num_covered < parameter_blocks.size()) {
+ vector<ParameterBlock*> independent_set_ordering;
+ const int independent_set_size =
+ IndependentSetOrdering(*graph, &independent_set_ordering);
+ for (int i = 0; i < independent_set_size; ++i) {
+ ParameterBlock* parameter_block = independent_set_ordering[i];
+ ordering->AddElementToGroup(parameter_block->mutable_user_state(), round);
+ graph->RemoveVertex(parameter_block);
+ }
+ num_covered += independent_set_size;
+ ++round;
+ }
+}
+
+Graph<ParameterBlock*>*
+CreateHessianGraph(const Program& program) {
+ Graph<ParameterBlock*>* graph = CHECK_NOTNULL(new Graph<ParameterBlock*>);
+ const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
+ for (int i = 0; i < parameter_blocks.size(); ++i) {
+ ParameterBlock* parameter_block = parameter_blocks[i];
+ if (!parameter_block->IsConstant()) {
+ graph->AddVertex(parameter_block);
+ }
+ }
+
+ const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
+ for (int i = 0; i < residual_blocks.size(); ++i) {
+ const ResidualBlock* residual_block = residual_blocks[i];
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+ ParameterBlock* const* parameter_blocks =
+ residual_block->parameter_blocks();
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ if (parameter_blocks[j]->IsConstant()) {
+ continue;
+ }
+
+ for (int k = j + 1; k < num_parameter_blocks; ++k) {
+ if (parameter_blocks[k]->IsConstant()) {
+ continue;
+ }
+
+ graph->AddEdge(parameter_blocks[j], parameter_blocks[k]);
+ }
+ }
+ }
+
+ return graph;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/parameter_block_ordering.h b/internal/ceres/parameter_block_ordering.h
new file mode 100644
index 0000000..a5277a4
--- /dev/null
+++ b/internal/ceres/parameter_block_ordering.h
@@ -0,0 +1,78 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_PARAMETER_BLOCK_ORDERING_H_
+#define CERES_INTERNAL_PARAMETER_BLOCK_ORDERING_H_
+
+#include <vector>
+#include "ceres/ordered_groups.h"
+#include "ceres/graph.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+class ParameterBlock;
+
+// Uses an approximate independent set ordering to order the parameter
+// blocks of a problem so that it is suitable for use with Schur
+// complement based solvers. The output variable ordering contains an
+// ordering of the parameter blocks and the return value is size of
+// the independent set or the number of e_blocks (see
+// schur_complement_solver.h for an explanation). Constant parameters
+// are added to the end.
+//
+// The ordering vector has the structure
+//
+// ordering = [independent set,
+// complement of the independent set,
+// fixed blocks]
+int ComputeSchurOrdering(const Program& program,
+ vector<ParameterBlock* >* ordering);
+
+// Use an approximate independent set ordering to decompose the
+// parameter blocks of a problem in a sequence of independent
+// sets. The ordering covers all the non-constant parameter blocks in
+// the program.
+void ComputeRecursiveIndependentSetOrdering(const Program& program,
+ ParameterBlockOrdering* ordering);
+
+// Builds a graph on the parameter blocks of a Problem, whose
+// structure reflects the sparsity structure of the Hessian. Each
+// vertex corresponds to a parameter block in the Problem except for
+// parameter blocks that are marked constant. An edge connects two
+// parameter blocks, if they co-occur in a residual block.
+Graph<ParameterBlock*>* CreateHessianGraph(const Program& program);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_PARAMETER_BLOCK_ORDERING_H_
diff --git a/internal/ceres/parameter_block_ordering_test.cc b/internal/ceres/parameter_block_ordering_test.cc
new file mode 100644
index 0000000..bc497d0
--- /dev/null
+++ b/internal/ceres/parameter_block_ordering_test.cc
@@ -0,0 +1,177 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/parameter_block_ordering.h"
+
+#include <cstddef>
+#include <vector>
+#include "gtest/gtest.h"
+#include "ceres/collections_port.h"
+#include "ceres/graph.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/stl_util.h"
+#include "ceres/cost_function.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/sized_cost_function.h"
+
+namespace ceres {
+namespace internal {
+
+typedef Graph<ParameterBlock*> HessianGraph;
+typedef HashSet<ParameterBlock*> VertexSet;
+
+template <int M, int N1 = 0, int N2 = 0, int N3 = 0>
+class DummyCostFunction: public SizedCostFunction<M, N1, N2, N3> {
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ return true;
+ }
+};
+
+class SchurOrderingTest : public ::testing::Test {
+ protected :
+ virtual void SetUp() {
+ // The explicit calls to AddParameterBlock are necessary because
+ // the below tests depend on the specific numbering of the
+ // parameter blocks.
+ problem_.AddParameterBlock(x_, 3);
+ problem_.AddParameterBlock(y_, 4);
+ problem_.AddParameterBlock(z_, 5);
+ problem_.AddParameterBlock(w_, 6);
+
+ problem_.AddResidualBlock(new DummyCostFunction<2, 3>, NULL, x_);
+ problem_.AddResidualBlock(new DummyCostFunction<6, 5, 4>, NULL, z_, y_);
+ problem_.AddResidualBlock(new DummyCostFunction<3, 3, 5>, NULL, x_, z_);
+ problem_.AddResidualBlock(new DummyCostFunction<7, 5, 3>, NULL, z_, x_);
+ problem_.AddResidualBlock(new DummyCostFunction<1, 5, 3, 6>, NULL,
+ z_, x_, w_);
+ }
+
+ ProblemImpl problem_;
+ double x_[3], y_[4], z_[5], w_[6];
+};
+
+TEST_F(SchurOrderingTest, NoFixed) {
+ const Program& program = problem_.program();
+ const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
+ scoped_ptr<HessianGraph> graph(CreateHessianGraph(program));
+
+ const VertexSet& vertices = graph->vertices();
+ EXPECT_EQ(vertices.size(), 4);
+
+ for (int i = 0; i < 4; ++i) {
+ EXPECT_TRUE(vertices.find(parameter_blocks[i]) != vertices.end());
+ }
+
+ {
+ const VertexSet& neighbors = graph->Neighbors(parameter_blocks[0]);
+ EXPECT_EQ(neighbors.size(), 2);
+ EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
+ EXPECT_TRUE(neighbors.find(parameter_blocks[3]) != neighbors.end());
+ }
+
+ {
+ const VertexSet& neighbors = graph->Neighbors(parameter_blocks[1]);
+ EXPECT_EQ(neighbors.size(), 1);
+ EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
+ }
+
+ {
+ const VertexSet& neighbors = graph->Neighbors(parameter_blocks[2]);
+ EXPECT_EQ(neighbors.size(), 3);
+ EXPECT_TRUE(neighbors.find(parameter_blocks[0]) != neighbors.end());
+ EXPECT_TRUE(neighbors.find(parameter_blocks[1]) != neighbors.end());
+ EXPECT_TRUE(neighbors.find(parameter_blocks[3]) != neighbors.end());
+ }
+
+ {
+ const VertexSet& neighbors = graph->Neighbors(parameter_blocks[3]);
+ EXPECT_EQ(neighbors.size(), 2);
+ EXPECT_TRUE(neighbors.find(parameter_blocks[0]) != neighbors.end());
+ EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
+ }
+}
+
+TEST_F(SchurOrderingTest, AllFixed) {
+ problem_.SetParameterBlockConstant(x_);
+ problem_.SetParameterBlockConstant(y_);
+ problem_.SetParameterBlockConstant(z_);
+ problem_.SetParameterBlockConstant(w_);
+
+ const Program& program = problem_.program();
+ scoped_ptr<HessianGraph> graph(CreateHessianGraph(program));
+ EXPECT_EQ(graph->vertices().size(), 0);
+}
+
+TEST_F(SchurOrderingTest, OneFixed) {
+ problem_.SetParameterBlockConstant(x_);
+
+ const Program& program = problem_.program();
+ const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
+ scoped_ptr<HessianGraph> graph(CreateHessianGraph(program));
+
+ const VertexSet& vertices = graph->vertices();
+
+ EXPECT_EQ(vertices.size(), 3);
+ EXPECT_TRUE(vertices.find(parameter_blocks[0]) == vertices.end());
+
+ for (int i = 1; i < 3; ++i) {
+ EXPECT_TRUE(vertices.find(parameter_blocks[i]) != vertices.end());
+ }
+
+ {
+ const VertexSet& neighbors = graph->Neighbors(parameter_blocks[1]);
+ EXPECT_EQ(neighbors.size(), 1);
+ EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
+ }
+
+ {
+ const VertexSet& neighbors = graph->Neighbors(parameter_blocks[2]);
+ EXPECT_EQ(neighbors.size(), 2);
+ EXPECT_TRUE(neighbors.find(parameter_blocks[1]) != neighbors.end());
+ EXPECT_TRUE(neighbors.find(parameter_blocks[3]) != neighbors.end());
+ }
+
+ {
+ const VertexSet& neighbors = graph->Neighbors(parameter_blocks[3]);
+ EXPECT_EQ(neighbors.size(), 1);
+ EXPECT_TRUE(neighbors.find(parameter_blocks[2]) != neighbors.end());
+ }
+
+ // The constant parameter block is at the end.
+ vector<ParameterBlock*> ordering;
+ ComputeSchurOrdering(program, &ordering);
+ EXPECT_EQ(ordering.back(), parameter_blocks[0]);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/parameter_block_test.cc b/internal/ceres/parameter_block_test.cc
new file mode 100644
index 0000000..35998dc
--- /dev/null
+++ b/internal/ceres/parameter_block_test.cc
@@ -0,0 +1,173 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/parameter_block.h"
+
+#include "gtest/gtest.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(ParameterBlock, SetLocalParameterization) {
+ double x[3] = { 1.0, 2.0, 3.0 };
+ ParameterBlock parameter_block(x, 3);
+
+ // The indices to set constant within the parameter block (used later).
+ vector<int> indices;
+ indices.push_back(1);
+
+ // Can't set the parameterization if the sizes don't match.
+ SubsetParameterization subset_wrong_size(4, indices);
+ EXPECT_DEATH_IF_SUPPORTED(
+ parameter_block.SetParameterization(&subset_wrong_size), "global");
+
+ // Can't set parameterization to NULL from NULL.
+ EXPECT_DEATH_IF_SUPPORTED
+ (parameter_block.SetParameterization(NULL), "NULL");
+
+ // Now set the parameterization.
+ SubsetParameterization subset(3, indices);
+ parameter_block.SetParameterization(&subset);
+
+ // Re-setting the parameterization to the same value is supported.
+ parameter_block.SetParameterization(&subset);
+
+ // Can't set parameterization to NULL from another parameterization.
+ EXPECT_DEATH_IF_SUPPORTED(parameter_block.SetParameterization(NULL), "NULL");
+
+ // Can't set the parameterization more than once.
+ SubsetParameterization subset_different(3, indices);
+ EXPECT_DEATH_IF_SUPPORTED
+ (parameter_block.SetParameterization(&subset_different), "re-set");
+
+ // Ensure the local parameterization jacobian result is correctly computed.
+ ConstMatrixRef local_parameterization_jacobian(
+ parameter_block.LocalParameterizationJacobian(),
+ 3,
+ 2);
+ ASSERT_EQ(1.0, local_parameterization_jacobian(0, 0));
+ ASSERT_EQ(0.0, local_parameterization_jacobian(0, 1));
+ ASSERT_EQ(0.0, local_parameterization_jacobian(1, 0));
+ ASSERT_EQ(0.0, local_parameterization_jacobian(1, 1));
+ ASSERT_EQ(0.0, local_parameterization_jacobian(2, 0));
+ ASSERT_EQ(1.0, local_parameterization_jacobian(2, 1));
+
+ // Check that updating works as expected.
+ double x_plus_delta[3];
+ double delta[2] = { 0.5, 0.3 };
+ parameter_block.Plus(x, delta, x_plus_delta);
+ ASSERT_EQ(1.5, x_plus_delta[0]);
+ ASSERT_EQ(2.0, x_plus_delta[1]);
+ ASSERT_EQ(3.3, x_plus_delta[2]);
+}
+
+struct TestParameterization : public LocalParameterization {
+ public:
+ virtual ~TestParameterization() {}
+ virtual bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const {
+ LOG(FATAL) << "Shouldn't get called.";
+ return true;
+ }
+ virtual bool ComputeJacobian(const double* x,
+ double* jacobian) const {
+ jacobian[0] = *x * 2;
+ return true;
+ }
+
+ virtual int GlobalSize() const { return 1; }
+ virtual int LocalSize() const { return 1; }
+};
+
+TEST(ParameterBlock, SetStateUpdatesLocalParameterizationJacobian) {
+ TestParameterization test_parameterization;
+ double x[1] = { 1.0 };
+ ParameterBlock parameter_block(x, 1, &test_parameterization);
+
+ EXPECT_EQ(2.0, *parameter_block.LocalParameterizationJacobian());
+
+ x[0] = 5.5;
+ parameter_block.SetState(x);
+ EXPECT_EQ(11.0, *parameter_block.LocalParameterizationJacobian());
+}
+
+TEST(ParameterBlock, PlusWithNoLocalParameterization) {
+ double x[2] = { 1.0, 2.0 };
+ ParameterBlock parameter_block(x, 2);
+
+ double delta[2] = { 0.2, 0.3 };
+ double x_plus_delta[2];
+ parameter_block.Plus(x, delta, x_plus_delta);
+ EXPECT_EQ(1.2, x_plus_delta[0]);
+ EXPECT_EQ(2.3, x_plus_delta[1]);
+}
+
+// Stops computing the jacobian after the first time.
+class BadLocalParameterization : public LocalParameterization {
+ public:
+ BadLocalParameterization()
+ : calls_(0) {
+ }
+
+ virtual ~BadLocalParameterization() {}
+ virtual bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const {
+ *x_plus_delta = *x + *delta;
+ return true;
+ }
+
+ virtual bool ComputeJacobian(const double* x, double* jacobian) const {
+ if (calls_ == 0) {
+ jacobian[0] = 0;
+ }
+ ++calls_;
+ return true;
+ }
+
+ virtual int GlobalSize() const { return 1;}
+ virtual int LocalSize() const { return 1;}
+
+ private:
+ mutable int calls_;
+};
+
+TEST(ParameterBlock, DetectBadLocalParameterization) {
+ double x = 1;
+ BadLocalParameterization bad_parameterization;
+ ParameterBlock parameter_block(&x, 1, &bad_parameterization);
+ double y = 2;
+ EXPECT_FALSE(parameter_block.SetState(&y));
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/partitioned_matrix_view.cc b/internal/ceres/partitioned_matrix_view.cc
new file mode 100644
index 0000000..0722fc8
--- /dev/null
+++ b/internal/ceres/partitioned_matrix_view.cc
@@ -0,0 +1,315 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
+
+#include "ceres/partitioned_matrix_view.h"
+
+#include <algorithm>
+#include <cstring>
+#include <vector>
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+PartitionedMatrixView::PartitionedMatrixView(
+ const BlockSparseMatrixBase& matrix,
+ int num_col_blocks_a)
+ : matrix_(matrix),
+ num_col_blocks_e_(num_col_blocks_a) {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+ CHECK_NOTNULL(bs);
+
+ num_col_blocks_f_ = bs->cols.size() - num_col_blocks_a;
+
+ // Compute the number of row blocks in E. The number of row blocks
+ // in E maybe less than the number of row blocks in the input matrix
+ // as some of the row blocks at the bottom may not have any
+ // e_blocks. For a definition of what an e_block is, please see
+ // explicit_schur_complement_solver.h
+ num_row_blocks_e_ = 0;
+ for (int r = 0; r < bs->rows.size(); ++r) {
+ const vector<Cell>& cells = bs->rows[r].cells;
+ if (cells[0].block_id < num_col_blocks_a) {
+ ++num_row_blocks_e_;
+ }
+ }
+
+ // Compute the number of columns in E and F.
+ num_cols_e_ = 0;
+ num_cols_f_ = 0;
+
+ for (int c = 0; c < bs->cols.size(); ++c) {
+ const Block& block = bs->cols[c];
+ if (c < num_col_blocks_a) {
+ num_cols_e_ += block.size;
+ } else {
+ num_cols_f_ += block.size;
+ }
+ }
+
+ CHECK_EQ(num_cols_e_ + num_cols_f_, matrix_.num_cols());
+}
+
+PartitionedMatrixView::~PartitionedMatrixView() {
+}
+
+// The next four methods don't seem to be particularly cache
+// friendly. This is an artifact of how the BlockStructure of the
+// input matrix is constructed. These methods will benefit from
+// multithreading as well as improved data layout.
+
+void PartitionedMatrixView::RightMultiplyE(const double* x, double* y) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+ // Iterate over the first num_row_blocks_e_ row blocks, and multiply
+ // by the first cell in each row block.
+ for (int r = 0; r < num_row_blocks_e_; ++r) {
+ const double* row_values = matrix_.RowBlockValues(r);
+ const Cell& cell = bs->rows[r].cells[0];
+ const int row_block_pos = bs->rows[r].block.position;
+ const int row_block_size = bs->rows[r].block.size;
+ const int col_block_id = cell.block_id;
+ const int col_block_pos = bs->cols[col_block_id].position;
+ const int col_block_size = bs->cols[col_block_id].size;
+
+ ConstVectorRef xref(x + col_block_pos, col_block_size);
+ VectorRef yref(y + row_block_pos, row_block_size);
+ ConstMatrixRef m(row_values + cell.position,
+ row_block_size,
+ col_block_size);
+ yref += m.lazyProduct(xref);
+ }
+}
+
+void PartitionedMatrixView::RightMultiplyF(const double* x, double* y) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+ // Iterate over row blocks, and if the row block is in E, then
+ // multiply by all the cells except the first one which is of type
+ // E. If the row block is not in E (i.e its in the bottom
+ // num_row_blocks - num_row_blocks_e row blocks), then all the cells
+ // are of type F and multiply by them all.
+ for (int r = 0; r < bs->rows.size(); ++r) {
+ const int row_block_pos = bs->rows[r].block.position;
+ const int row_block_size = bs->rows[r].block.size;
+ VectorRef yref(y + row_block_pos, row_block_size);
+ const vector<Cell>& cells = bs->rows[r].cells;
+ for (int c = (r < num_row_blocks_e_) ? 1 : 0; c < cells.size(); ++c) {
+ const double* row_values = matrix_.RowBlockValues(r);
+ const int col_block_id = cells[c].block_id;
+ const int col_block_pos = bs->cols[col_block_id].position;
+ const int col_block_size = bs->cols[col_block_id].size;
+
+ ConstVectorRef xref(x + col_block_pos - num_cols_e(),
+ col_block_size);
+ ConstMatrixRef m(row_values + cells[c].position,
+ row_block_size,
+ col_block_size);
+ yref += m.lazyProduct(xref);
+ }
+ }
+}
+
+void PartitionedMatrixView::LeftMultiplyE(const double* x, double* y) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+ // Iterate over the first num_row_blocks_e_ row blocks, and multiply
+ // by the first cell in each row block.
+ for (int r = 0; r < num_row_blocks_e_; ++r) {
+ const Cell& cell = bs->rows[r].cells[0];
+ const double* row_values = matrix_.RowBlockValues(r);
+ const int row_block_pos = bs->rows[r].block.position;
+ const int row_block_size = bs->rows[r].block.size;
+ const int col_block_id = cell.block_id;
+ const int col_block_pos = bs->cols[col_block_id].position;
+ const int col_block_size = bs->cols[col_block_id].size;
+
+ ConstVectorRef xref(x + row_block_pos, row_block_size);
+ VectorRef yref(y + col_block_pos, col_block_size);
+ ConstMatrixRef m(row_values + cell.position,
+ row_block_size,
+ col_block_size);
+ yref += m.transpose().lazyProduct(xref);
+ }
+}
+
+void PartitionedMatrixView::LeftMultiplyF(const double* x, double* y) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+
+ // Iterate over row blocks, and if the row block is in E, then
+ // multiply by all the cells except the first one which is of type
+ // E. If the row block is not in E (i.e its in the bottom
+ // num_row_blocks - num_row_blocks_e row blocks), then all the cells
+ // are of type F and multiply by them all.
+ for (int r = 0; r < bs->rows.size(); ++r) {
+ const int row_block_pos = bs->rows[r].block.position;
+ const int row_block_size = bs->rows[r].block.size;
+ ConstVectorRef xref(x + row_block_pos, row_block_size);
+ const vector<Cell>& cells = bs->rows[r].cells;
+ for (int c = (r < num_row_blocks_e_) ? 1 : 0; c < cells.size(); ++c) {
+ const double* row_values = matrix_.RowBlockValues(r);
+ const int col_block_id = cells[c].block_id;
+ const int col_block_pos = bs->cols[col_block_id].position;
+ const int col_block_size = bs->cols[col_block_id].size;
+
+ VectorRef yref(y + col_block_pos - num_cols_e(), col_block_size);
+ ConstMatrixRef m(row_values + cells[c].position,
+ row_block_size,
+ col_block_size);
+ yref += m.transpose().lazyProduct(xref);
+ }
+ }
+}
+
+// Given a range of columns blocks of a matrix m, compute the block
+// structure of the block diagonal of the matrix m(:,
+// start_col_block:end_col_block)'m(:, start_col_block:end_col_block)
+// and return a BlockSparseMatrix with the this block structure. The
+// caller owns the result.
+BlockSparseMatrix* PartitionedMatrixView::CreateBlockDiagonalMatrixLayout(
+ int start_col_block, int end_col_block) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+ CompressedRowBlockStructure* block_diagonal_structure =
+ new CompressedRowBlockStructure;
+
+ int block_position = 0;
+ int diagonal_cell_position = 0;
+
+ // Iterate over the column blocks, creating a new diagonal block for
+ // each column block.
+ for (int c = start_col_block; c < end_col_block; ++c) {
+ const Block& block = bs->cols[c];
+ block_diagonal_structure->cols.push_back(Block());
+ Block& diagonal_block = block_diagonal_structure->cols.back();
+ diagonal_block.size = block.size;
+ diagonal_block.position = block_position;
+
+ block_diagonal_structure->rows.push_back(CompressedRow());
+ CompressedRow& row = block_diagonal_structure->rows.back();
+ row.block = diagonal_block;
+
+ row.cells.push_back(Cell());
+ Cell& cell = row.cells.back();
+ cell.block_id = c - start_col_block;
+ cell.position = diagonal_cell_position;
+
+ block_position += block.size;
+ diagonal_cell_position += block.size * block.size;
+ }
+
+ // Build a BlockSparseMatrix with the just computed block
+ // structure.
+ return new BlockSparseMatrix(block_diagonal_structure);
+}
+
+BlockSparseMatrix* PartitionedMatrixView::CreateBlockDiagonalEtE() const {
+ BlockSparseMatrix* block_diagonal =
+ CreateBlockDiagonalMatrixLayout(0, num_col_blocks_e_);
+ UpdateBlockDiagonalEtE(block_diagonal);
+ return block_diagonal;
+}
+
+BlockSparseMatrix* PartitionedMatrixView::CreateBlockDiagonalFtF() const {
+ BlockSparseMatrix* block_diagonal =
+ CreateBlockDiagonalMatrixLayout(
+ num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_);
+ UpdateBlockDiagonalFtF(block_diagonal);
+ return block_diagonal;
+}
+
+// Similar to the code in RightMultiplyE, except instead of the matrix
+// vector multiply its an outer product.
+//
+// block_diagonal = block_diagonal(E'E)
+void PartitionedMatrixView::UpdateBlockDiagonalEtE(
+ BlockSparseMatrix* block_diagonal) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+ const CompressedRowBlockStructure* block_diagonal_structure =
+ block_diagonal->block_structure();
+
+ block_diagonal->SetZero();
+
+ for (int r = 0; r < num_row_blocks_e_ ; ++r) {
+ const double* row_values = matrix_.RowBlockValues(r);
+ const Cell& cell = bs->rows[r].cells[0];
+ const int row_block_size = bs->rows[r].block.size;
+ const int block_id = cell.block_id;
+ const int col_block_size = bs->cols[block_id].size;
+ ConstMatrixRef m(row_values + cell.position,
+ row_block_size,
+ col_block_size);
+
+ const int cell_position =
+ block_diagonal_structure->rows[block_id].cells[0].position;
+
+ MatrixRef(block_diagonal->mutable_values() + cell_position,
+ col_block_size, col_block_size).noalias() += m.transpose() * m;
+ }
+}
+
+// Similar to the code in RightMultiplyF, except instead of the matrix
+// vector multiply its an outer product.
+//
+// block_diagonal = block_diagonal(F'F)
+//
+void PartitionedMatrixView::UpdateBlockDiagonalFtF(
+ BlockSparseMatrix* block_diagonal) const {
+ const CompressedRowBlockStructure* bs = matrix_.block_structure();
+ const CompressedRowBlockStructure* block_diagonal_structure =
+ block_diagonal->block_structure();
+
+ block_diagonal->SetZero();
+ for (int r = 0; r < bs->rows.size(); ++r) {
+ const int row_block_size = bs->rows[r].block.size;
+ const vector<Cell>& cells = bs->rows[r].cells;
+ const double* row_values = matrix_.RowBlockValues(r);
+ for (int c = (r < num_row_blocks_e_) ? 1 : 0; c < cells.size(); ++c) {
+ const int col_block_id = cells[c].block_id;
+ const int col_block_size = bs->cols[col_block_id].size;
+ ConstMatrixRef m(row_values + cells[c].position,
+ row_block_size,
+ col_block_size);
+ const int diagonal_block_id = col_block_id - num_col_blocks_e_;
+ const int cell_position =
+ block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
+
+ MatrixRef(block_diagonal->mutable_values() + cell_position,
+ col_block_size, col_block_size).noalias() += m.transpose() * m;
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/partitioned_matrix_view.h b/internal/ceres/partitioned_matrix_view.h
new file mode 100644
index 0000000..cfe4de5
--- /dev/null
+++ b/internal/ceres/partitioned_matrix_view.h
@@ -0,0 +1,121 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// For generalized bi-partite Jacobian matrices that arise in
+// Structure from Motion related problems, it is sometimes useful to
+// have access to the two parts of the matrix as linear operators
+// themselves. This class provides that functionality.
+
+#ifndef CERES_INTERNAL_PARTITIONED_MATRIX_VIEW_H_
+#define CERES_INTERNAL_PARTITIONED_MATRIX_VIEW_H_
+
+#include "ceres/block_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+// Given generalized bi-partite matrix A = [E F], with the same block
+// structure as required by the Schur complement based solver, found
+// in explicit_schur_complement_solver.h, provide access to the
+// matrices E and F and their outer products E'E and F'F with
+// themselves.
+//
+// Lack of BlockStructure object will result in a crash and if the
+// block structure of the matrix does not satisfy the requirements of
+// the Schur complement solver it will result in unpredictable and
+// wrong output.
+//
+// This class lives in the internal name space as its a utility class
+// to be used by the IterativeSchurComplementSolver class, found in
+// iterative_schur_complement_solver.h, and is not meant for general
+// consumption.
+class PartitionedMatrixView {
+ public:
+ // matrix = [E F], where the matrix E contains the first
+ // num_col_blocks_a column blocks.
+ PartitionedMatrixView(const BlockSparseMatrixBase& matrix,
+ int num_col_blocks_a);
+ ~PartitionedMatrixView();
+
+ // y += E'x
+ void LeftMultiplyE(const double* x, double* y) const;
+
+ // y += F'x
+ void LeftMultiplyF(const double* x, double* y) const;
+
+ // y += Ex
+ void RightMultiplyE(const double* x, double* y) const;
+
+ // y += Fx
+ void RightMultiplyF(const double* x, double* y) const;
+
+ // Create and return the block diagonal of the matrix E'E.
+ BlockSparseMatrix* CreateBlockDiagonalEtE() const;
+
+ // Create and return the block diagonal of the matrix F'F.
+ BlockSparseMatrix* CreateBlockDiagonalFtF() const;
+
+ // Compute the block diagonal of the matrix E'E and store it in
+ // block_diagonal. The matrix block_diagonal is expected to have a
+ // BlockStructure (preferably created using
+ // CreateBlockDiagonalMatrixEtE) which is has the same structure as
+ // the block diagonal of E'E.
+ void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const;
+
+ // Compute the block diagonal of the matrix F'F and store it in
+ // block_diagonal. The matrix block_diagonal is expected to have a
+ // BlockStructure (preferably created using
+ // CreateBlockDiagonalMatrixFtF) which is has the same structure as
+ // the block diagonal of F'F.
+ void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const;
+
+ int num_col_blocks_e() const { return num_col_blocks_e_; }
+ int num_col_blocks_f() const { return num_col_blocks_f_; }
+ int num_cols_e() const { return num_cols_e_; }
+ int num_cols_f() const { return num_cols_f_; }
+ int num_rows() const { return matrix_.num_rows(); }
+ int num_cols() const { return matrix_.num_cols(); }
+
+ private:
+ BlockSparseMatrix* CreateBlockDiagonalMatrixLayout(int start_col_block,
+ int end_col_block) const;
+
+ const BlockSparseMatrixBase& matrix_;
+ int num_row_blocks_e_;
+ int num_col_blocks_e_;
+ int num_col_blocks_f_;
+ int num_cols_e_;
+ int num_cols_f_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_PARTITIONED_MATRIX_VIEW_H_
diff --git a/internal/ceres/partitioned_matrix_view_test.cc b/internal/ceres/partitioned_matrix_view_test.cc
new file mode 100644
index 0000000..48f7d24
--- /dev/null
+++ b/internal/ceres/partitioned_matrix_view_test.cc
@@ -0,0 +1,191 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/partitioned_matrix_view.h"
+
+#include <vector>
+#include "ceres/block_structure.h"
+#include "ceres/casts.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/random.h"
+#include "ceres/sparse_matrix.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+const double kEpsilon = 1e-14;
+
+class PartitionedMatrixViewTest : public ::testing::Test {
+ protected :
+ virtual void SetUp() {
+ scoped_ptr<LinearLeastSquaresProblem> problem(
+ CreateLinearLeastSquaresProblemFromId(2));
+ CHECK_NOTNULL(problem.get());
+ A_.reset(problem->A.release());
+
+ num_cols_ = A_->num_cols();
+ num_rows_ = A_->num_rows();
+ num_eliminate_blocks_ = problem->num_eliminate_blocks;
+ }
+
+ int num_rows_;
+ int num_cols_;
+ int num_eliminate_blocks_;
+
+ scoped_ptr<SparseMatrix> A_;
+};
+
+TEST_F(PartitionedMatrixViewTest, DimensionsTest) {
+ PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
+ num_eliminate_blocks_);
+ EXPECT_EQ(m.num_col_blocks_e(), num_eliminate_blocks_);
+ EXPECT_EQ(m.num_col_blocks_f(), num_cols_ - num_eliminate_blocks_);
+ EXPECT_EQ(m.num_cols_e(), num_eliminate_blocks_);
+ EXPECT_EQ(m.num_cols_f(), num_cols_ - num_eliminate_blocks_);
+ EXPECT_EQ(m.num_cols(), A_->num_cols());
+ EXPECT_EQ(m.num_rows(), A_->num_rows());
+}
+
+TEST_F(PartitionedMatrixViewTest, RightMultiplyE) {
+ PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
+ num_eliminate_blocks_);
+
+ srand(5);
+
+ Vector x1(m.num_cols_e());
+ Vector x2(m.num_cols());
+ x2.setZero();
+
+ for (int i = 0; i < m.num_cols_e(); ++i) {
+ x1(i) = x2(i) = RandDouble();
+ }
+
+ Vector y1 = Vector::Zero(m.num_rows());
+ m.RightMultiplyE(x1.data(), y1.data());
+
+ Vector y2 = Vector::Zero(m.num_rows());
+ A_->RightMultiply(x2.data(), y2.data());
+
+ for (int i = 0; i < m.num_rows(); ++i) {
+ EXPECT_NEAR(y1(i), y2(i), kEpsilon);
+ }
+}
+
+TEST_F(PartitionedMatrixViewTest, RightMultiplyF) {
+ PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
+ num_eliminate_blocks_);
+
+ srand(5);
+
+ Vector x1(m.num_cols_f());
+ Vector x2 = Vector::Zero(m.num_cols());
+
+ for (int i = 0; i < m.num_cols_f(); ++i) {
+ x1(i) = RandDouble();
+ x2(i + m.num_cols_e()) = x1(i);
+ }
+
+ Vector y1 = Vector::Zero(m.num_rows());
+ m.RightMultiplyF(x1.data(), y1.data());
+
+ Vector y2 = Vector::Zero(m.num_rows());
+ A_->RightMultiply(x2.data(), y2.data());
+
+ for (int i = 0; i < m.num_rows(); ++i) {
+ EXPECT_NEAR(y1(i), y2(i), kEpsilon);
+ }
+}
+
+TEST_F(PartitionedMatrixViewTest, LeftMultiply) {
+ PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
+ num_eliminate_blocks_);
+
+ srand(5);
+
+ Vector x = Vector::Zero(m.num_rows());
+ for (int i = 0; i < m.num_rows(); ++i) {
+ x(i) = RandDouble();
+ }
+
+ Vector y = Vector::Zero(m.num_cols());
+ Vector y1 = Vector::Zero(m.num_cols_e());
+ Vector y2 = Vector::Zero(m.num_cols_f());
+
+ A_->LeftMultiply(x.data(), y.data());
+ m.LeftMultiplyE(x.data(), y1.data());
+ m.LeftMultiplyF(x.data(), y2.data());
+
+ for (int i = 0; i < m.num_cols(); ++i) {
+ EXPECT_NEAR(y(i),
+ (i < m.num_cols_e()) ? y1(i) : y2(i - m.num_cols_e()),
+ kEpsilon);
+ }
+}
+
+TEST_F(PartitionedMatrixViewTest, BlockDiagonalEtE) {
+ PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
+ num_eliminate_blocks_);
+
+ scoped_ptr<BlockSparseMatrix>
+ block_diagonal_ee(m.CreateBlockDiagonalEtE());
+ const CompressedRowBlockStructure* bs = block_diagonal_ee->block_structure();
+
+ EXPECT_EQ(block_diagonal_ee->num_rows(), 2);
+ EXPECT_EQ(block_diagonal_ee->num_cols(), 2);
+ EXPECT_EQ(bs->cols.size(), 2);
+ EXPECT_EQ(bs->rows.size(), 2);
+
+ EXPECT_NEAR(block_diagonal_ee->values()[0], 10.0, kEpsilon);
+ EXPECT_NEAR(block_diagonal_ee->values()[1], 155.0, kEpsilon);
+}
+
+TEST_F(PartitionedMatrixViewTest, BlockDiagonalFtF) {
+ PartitionedMatrixView m(*down_cast<BlockSparseMatrix*>(A_.get()),
+ num_eliminate_blocks_);
+
+ scoped_ptr<BlockSparseMatrix>
+ block_diagonal_ff(m.CreateBlockDiagonalFtF());
+ const CompressedRowBlockStructure* bs = block_diagonal_ff->block_structure();
+
+ EXPECT_EQ(block_diagonal_ff->num_rows(), 3);
+ EXPECT_EQ(block_diagonal_ff->num_cols(), 3);
+ EXPECT_EQ(bs->cols.size(), 3);
+ EXPECT_EQ(bs->rows.size(), 3);
+ EXPECT_NEAR(block_diagonal_ff->values()[0], 70.0, kEpsilon);
+ EXPECT_NEAR(block_diagonal_ff->values()[1], 17.0, kEpsilon);
+ EXPECT_NEAR(block_diagonal_ff->values()[2], 37.0, kEpsilon);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/polynomial_solver.cc b/internal/ceres/polynomial_solver.cc
new file mode 100644
index 0000000..0ece7bc
--- /dev/null
+++ b/internal/ceres/polynomial_solver.cc
@@ -0,0 +1,183 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: moll.markus@arcor.de (Markus Moll)
+
+#include "ceres/polynomial_solver.h"
+
+#include <cmath>
+#include <cstddef>
+#include "Eigen/Dense"
+#include "ceres/internal/port.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// Balancing function as described by B. N. Parlett and C. Reinsch,
+// "Balancing a Matrix for Calculation of Eigenvalues and Eigenvectors".
+// In: Numerische Mathematik, Volume 13, Number 4 (1969), 293-304,
+// Springer Berlin / Heidelberg. DOI: 10.1007/BF02165404
+void BalanceCompanionMatrix(Matrix* companion_matrix_ptr) {
+ CHECK_NOTNULL(companion_matrix_ptr);
+ Matrix& companion_matrix = *companion_matrix_ptr;
+ Matrix companion_matrix_offdiagonal = companion_matrix;
+ companion_matrix_offdiagonal.diagonal().setZero();
+
+ const int degree = companion_matrix.rows();
+
+ // gamma <= 1 controls how much a change in the scaling has to
+ // lower the 1-norm of the companion matrix to be accepted.
+ //
+ // gamma = 1 seems to lead to cycles (numerical issues?), so
+ // we set it slightly lower.
+ const double gamma = 0.9;
+
+ // Greedily scale row/column pairs until there is no change.
+ bool scaling_has_changed;
+ do {
+ scaling_has_changed = false;
+
+ for (int i = 0; i < degree; ++i) {
+ const double row_norm = companion_matrix_offdiagonal.row(i).lpNorm<1>();
+ const double col_norm = companion_matrix_offdiagonal.col(i).lpNorm<1>();
+
+ // Decompose row_norm/col_norm into mantissa * 2^exponent,
+ // where 0.5 <= mantissa < 1. Discard mantissa (return value
+ // of frexp), as only the exponent is needed.
+ int exponent = 0;
+ std::frexp(row_norm / col_norm, &exponent);
+ exponent /= 2;
+
+ if (exponent != 0) {
+ const double scaled_col_norm = std::ldexp(col_norm, exponent);
+ const double scaled_row_norm = std::ldexp(row_norm, -exponent);
+ if (scaled_col_norm + scaled_row_norm < gamma * (col_norm + row_norm)) {
+ // Accept the new scaling. (Multiplication by powers of 2 should not
+ // introduce rounding errors (ignoring non-normalized numbers and
+ // over- or underflow))
+ scaling_has_changed = true;
+ companion_matrix_offdiagonal.row(i) *= std::ldexp(1.0, -exponent);
+ companion_matrix_offdiagonal.col(i) *= std::ldexp(1.0, exponent);
+ }
+ }
+ }
+ } while (scaling_has_changed);
+
+ companion_matrix_offdiagonal.diagonal() = companion_matrix.diagonal();
+ companion_matrix = companion_matrix_offdiagonal;
+ VLOG(3) << "Balanced companion matrix is\n" << companion_matrix;
+}
+
+void BuildCompanionMatrix(const Vector& polynomial,
+ Matrix* companion_matrix_ptr) {
+ CHECK_NOTNULL(companion_matrix_ptr);
+ Matrix& companion_matrix = *companion_matrix_ptr;
+
+ const int degree = polynomial.size() - 1;
+
+ companion_matrix.resize(degree, degree);
+ companion_matrix.setZero();
+ companion_matrix.diagonal(-1).setOnes();
+ companion_matrix.col(degree - 1) = -polynomial.reverse().head(degree);
+}
+
+// Remove leading terms with zero coefficients.
+Vector RemoveLeadingZeros(const Vector& polynomial_in) {
+ int i = 0;
+ while (i < (polynomial_in.size() - 1) && polynomial_in(i) == 0.0) {
+ ++i;
+ }
+ return polynomial_in.tail(polynomial_in.size() - i);
+}
+} // namespace
+
+bool FindPolynomialRoots(const Vector& polynomial_in,
+ Vector* real,
+ Vector* imaginary) {
+ if (polynomial_in.size() == 0) {
+ LOG(ERROR) << "Invalid polynomial of size 0 passed to FindPolynomialRoots";
+ return false;
+ }
+
+ Vector polynomial = RemoveLeadingZeros(polynomial_in);
+ const int degree = polynomial.size() - 1;
+
+ // Is the polynomial constant?
+ if (degree == 0) {
+ LOG(WARNING) << "Trying to extract roots from a constant "
+ << "polynomial in FindPolynomialRoots";
+ return true;
+ }
+
+ // Divide by leading term
+ const double leading_term = polynomial(0);
+ polynomial /= leading_term;
+
+ // Separately handle linear polynomials.
+ if (degree == 1) {
+ if (real != NULL) {
+ real->resize(1);
+ (*real)(0) = -polynomial(1);
+ }
+ if (imaginary != NULL) {
+ imaginary->resize(1);
+ imaginary->setZero();
+ }
+ }
+
+ // The degree is now known to be at least 2.
+ // Build and balance the companion matrix to the polynomial.
+ Matrix companion_matrix(degree, degree);
+ BuildCompanionMatrix(polynomial, &companion_matrix);
+ BalanceCompanionMatrix(&companion_matrix);
+
+ // Find its (complex) eigenvalues.
+ Eigen::EigenSolver<Matrix> solver(companion_matrix, false);
+ if (solver.info() != Eigen::Success) {
+ LOG(ERROR) << "Failed to extract eigenvalues from companion matrix.";
+ return false;
+ }
+
+ // Output roots
+ if (real != NULL) {
+ *real = solver.eigenvalues().real();
+ } else {
+ LOG(WARNING) << "NULL pointer passed as real argument to "
+ << "FindPolynomialRoots. Real parts of the roots will not "
+ << "be returned.";
+ }
+ if (imaginary != NULL) {
+ *imaginary = solver.eigenvalues().imag();
+ }
+ return true;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/polynomial_solver.h b/internal/ceres/polynomial_solver.h
new file mode 100644
index 0000000..1cf07dd
--- /dev/null
+++ b/internal/ceres/polynomial_solver.h
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: moll.markus@arcor.de (Markus Moll)
+
+#ifndef CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
+#define CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
+
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+// Use the companion matrix eigenvalues to determine the roots of the polynomial
+//
+// sum_{i=0}^N polynomial(i) x^{N-i}.
+//
+// This function returns true on success, false otherwise.
+// Failure indicates that the polynomial is invalid (of size 0) or
+// that the eigenvalues of the companion matrix could not be computed.
+// On failure, a more detailed message will be written to LOG(ERROR).
+// If real is not NULL, the real parts of the roots will be returned in it.
+// Likewise, if imaginary is not NULL, imaginary parts will be returned in it.
+bool FindPolynomialRoots(const Vector& polynomial,
+ Vector* real,
+ Vector* imaginary);
+
+// Evaluate the polynomial at x using the Horner scheme.
+inline double EvaluatePolynomial(const Vector& polynomial, double x) {
+ double v = 0.0;
+ for (int i = 0; i < polynomial.size(); ++i) {
+ v = v * x + polynomial(i);
+ }
+ return v;
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
diff --git a/internal/ceres/polynomial_solver_test.cc b/internal/ceres/polynomial_solver_test.cc
new file mode 100644
index 0000000..ae347fb
--- /dev/null
+++ b/internal/ceres/polynomial_solver_test.cc
@@ -0,0 +1,223 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: moll.markus@arcor.de (Markus Moll)
+
+#include "ceres/polynomial_solver.h"
+
+#include <limits>
+#include <cmath>
+#include <cstddef>
+#include <algorithm>
+#include "gtest/gtest.h"
+#include "ceres/test_util.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// For IEEE-754 doubles, machine precision is about 2e-16.
+const double kEpsilon = 1e-13;
+const double kEpsilonLoose = 1e-9;
+
+// Return the constant polynomial p(x) = 1.23.
+Vector ConstantPolynomial(double value) {
+ Vector poly(1);
+ poly(0) = value;
+ return poly;
+}
+
+// Return the polynomial p(x) = poly(x) * (x - root).
+Vector AddRealRoot(const Vector& poly, double root) {
+ Vector poly2(poly.size() + 1);
+ poly2.setZero();
+ poly2.head(poly.size()) += poly;
+ poly2.tail(poly.size()) -= root * poly;
+ return poly2;
+}
+
+// Return the polynomial
+// p(x) = poly(x) * (x - real - imag*i) * (x - real + imag*i).
+Vector AddComplexRootPair(const Vector& poly, double real, double imag) {
+ Vector poly2(poly.size() + 2);
+ poly2.setZero();
+ // Multiply poly by x^2 - 2real + abs(real,imag)^2
+ poly2.head(poly.size()) += poly;
+ poly2.segment(1, poly.size()) -= 2 * real * poly;
+ poly2.tail(poly.size()) += (real*real + imag*imag) * poly;
+ return poly2;
+}
+
+// Sort the entries in a vector.
+// Needed because the roots are not returned in sorted order.
+Vector SortVector(const Vector& in) {
+ Vector out(in);
+ std::sort(out.data(), out.data() + out.size());
+ return out;
+}
+
+// Run a test with the polynomial defined by the N real roots in roots_real.
+// If use_real is false, NULL is passed as the real argument to
+// FindPolynomialRoots. If use_imaginary is false, NULL is passed as the
+// imaginary argument to FindPolynomialRoots.
+template<int N>
+void RunPolynomialTestRealRoots(const double (&real_roots)[N],
+ bool use_real,
+ bool use_imaginary,
+ double epsilon) {
+ Vector real;
+ Vector imaginary;
+ Vector poly = ConstantPolynomial(1.23);
+ for (int i = 0; i < N; ++i) {
+ poly = AddRealRoot(poly, real_roots[i]);
+ }
+ Vector* const real_ptr = use_real ? &real : NULL;
+ Vector* const imaginary_ptr = use_imaginary ? &imaginary : NULL;
+ bool success = FindPolynomialRoots(poly, real_ptr, imaginary_ptr);
+
+ EXPECT_EQ(success, true);
+ if (use_real) {
+ EXPECT_EQ(real.size(), N);
+ real = SortVector(real);
+ ExpectArraysClose(N, real.data(), real_roots, epsilon);
+ }
+ if (use_imaginary) {
+ EXPECT_EQ(imaginary.size(), N);
+ const Vector zeros = Vector::Zero(N);
+ ExpectArraysClose(N, imaginary.data(), zeros.data(), epsilon);
+ }
+}
+} // namespace
+
+TEST(PolynomialSolver, InvalidPolynomialOfZeroLengthIsRejected) {
+ // Vector poly(0) is an ambiguous constructor call, so
+ // use the constructor with explicit column count.
+ Vector poly(0, 1);
+ Vector real;
+ Vector imag;
+ bool success = FindPolynomialRoots(poly, &real, &imag);
+
+ EXPECT_EQ(success, false);
+}
+
+TEST(PolynomialSolver, ConstantPolynomialReturnsNoRoots) {
+ Vector poly = ConstantPolynomial(1.23);
+ Vector real;
+ Vector imag;
+ bool success = FindPolynomialRoots(poly, &real, &imag);
+
+ EXPECT_EQ(success, true);
+ EXPECT_EQ(real.size(), 0);
+ EXPECT_EQ(imag.size(), 0);
+}
+
+TEST(PolynomialSolver, LinearPolynomialWithPositiveRootWorks) {
+ const double roots[1] = { 42.42 };
+ RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(PolynomialSolver, LinearPolynomialWithNegativeRootWorks) {
+ const double roots[1] = { -42.42 };
+ RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(PolynomialSolver, QuadraticPolynomialWithPositiveRootsWorks) {
+ const double roots[2] = { 1.0, 42.42 };
+ RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(PolynomialSolver, QuadraticPolynomialWithOneNegativeRootWorks) {
+ const double roots[2] = { -42.42, 1.0 };
+ RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(PolynomialSolver, QuadraticPolynomialWithTwoNegativeRootsWorks) {
+ const double roots[2] = { -42.42, -1.0 };
+ RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(PolynomialSolver, QuadraticPolynomialWithCloseRootsWorks) {
+ const double roots[2] = { 42.42, 42.43 };
+ RunPolynomialTestRealRoots(roots, true, false, kEpsilonLoose);
+}
+
+TEST(PolynomialSolver, QuadraticPolynomialWithComplexRootsWorks) {
+ Vector real;
+ Vector imag;
+
+ Vector poly = ConstantPolynomial(1.23);
+ poly = AddComplexRootPair(poly, 42.42, 4.2);
+ bool success = FindPolynomialRoots(poly, &real, &imag);
+
+ EXPECT_EQ(success, true);
+ EXPECT_EQ(real.size(), 2);
+ EXPECT_EQ(imag.size(), 2);
+ ExpectClose(real(0), 42.42, kEpsilon);
+ ExpectClose(real(1), 42.42, kEpsilon);
+ ExpectClose(std::abs(imag(0)), 4.2, kEpsilon);
+ ExpectClose(std::abs(imag(1)), 4.2, kEpsilon);
+ ExpectClose(std::abs(imag(0) + imag(1)), 0.0, kEpsilon);
+}
+
+TEST(PolynomialSolver, QuarticPolynomialWorks) {
+ const double roots[4] = { 1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5 };
+ RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(PolynomialSolver, QuarticPolynomialWithTwoClustersOfCloseRootsWorks) {
+ const double roots[4] = { 1.23e-1, 2.46e-1, 1.23e+5, 2.46e+5 };
+ RunPolynomialTestRealRoots(roots, true, true, kEpsilonLoose);
+}
+
+TEST(PolynomialSolver, QuarticPolynomialWithTwoZeroRootsWorks) {
+ const double roots[4] = { -42.42, 0.0, 0.0, 42.42 };
+ RunPolynomialTestRealRoots(roots, true, true, kEpsilonLoose);
+}
+
+TEST(PolynomialSolver, QuarticMonomialWorks) {
+ const double roots[4] = { 0.0, 0.0, 0.0, 0.0 };
+ RunPolynomialTestRealRoots(roots, true, true, kEpsilon);
+}
+
+TEST(PolynomialSolver, NullPointerAsImaginaryPartWorks) {
+ const double roots[4] = { 1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5 };
+ RunPolynomialTestRealRoots(roots, true, false, kEpsilon);
+}
+
+TEST(PolynomialSolver, NullPointerAsRealPartWorks) {
+ const double roots[4] = { 1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5 };
+ RunPolynomialTestRealRoots(roots, false, true, kEpsilon);
+}
+
+TEST(PolynomialSolver, BothOutputArgumentsNullWorks) {
+ const double roots[4] = { 1.23e-4, 1.23e-1, 1.23e+2, 1.23e+5 };
+ RunPolynomialTestRealRoots(roots, false, false, kEpsilon);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/problem.cc b/internal/ceres/problem.cc
new file mode 100644
index 0000000..7ee5b5c
--- /dev/null
+++ b/internal/ceres/problem.cc
@@ -0,0 +1,189 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+// keir@google.com (Keir Mierle)
+
+#include "ceres/problem.h"
+
+#include <vector>
+#include "ceres/problem_impl.h"
+
+namespace ceres {
+
+class ResidualBlock;
+
+Problem::Problem() : problem_impl_(new internal::ProblemImpl) {}
+Problem::Problem(const Problem::Options& options)
+ : problem_impl_(new internal::ProblemImpl(options)) {}
+Problem::~Problem() {}
+
+ResidualBlockId Problem::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ const vector<double*>& parameter_blocks) {
+ return problem_impl_->AddResidualBlock(cost_function,
+ loss_function,
+ parameter_blocks);
+}
+
+ResidualBlockId Problem::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0) {
+ return problem_impl_->AddResidualBlock(cost_function,
+ loss_function,
+ x0);
+}
+
+ResidualBlockId Problem::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1) {
+ return problem_impl_->AddResidualBlock(cost_function,
+ loss_function,
+ x0, x1);
+}
+
+ResidualBlockId Problem::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2) {
+ return problem_impl_->AddResidualBlock(cost_function,
+ loss_function,
+ x0, x1, x2);
+}
+
+ResidualBlockId Problem::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3) {
+ return problem_impl_->AddResidualBlock(cost_function,
+ loss_function,
+ x0, x1, x2, x3);
+}
+
+ResidualBlockId Problem::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3, double* x4) {
+ return problem_impl_->AddResidualBlock(cost_function,
+ loss_function,
+ x0, x1, x2, x3, x4);
+}
+
+ResidualBlockId Problem::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3, double* x4, double* x5) {
+ return problem_impl_->AddResidualBlock(cost_function,
+ loss_function,
+ x0, x1, x2, x3, x4, x5);
+}
+
+ResidualBlockId Problem::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
+ double* x6) {
+ return problem_impl_->AddResidualBlock(cost_function,
+ loss_function,
+ x0, x1, x2, x3, x4, x5, x6);
+}
+
+ResidualBlockId Problem::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
+ double* x6, double* x7) {
+ return problem_impl_->AddResidualBlock(cost_function,
+ loss_function,
+ x0, x1, x2, x3, x4, x5, x6, x7);
+}
+
+ResidualBlockId Problem::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
+ double* x6, double* x7, double* x8) {
+ return problem_impl_->AddResidualBlock(cost_function,
+ loss_function,
+ x0, x1, x2, x3, x4, x5, x6, x7, x8);
+}
+
+ResidualBlockId Problem::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
+ double* x6, double* x7, double* x8, double* x9) {
+ return problem_impl_->AddResidualBlock(cost_function,
+ loss_function,
+ x0, x1, x2, x3, x4, x5, x6, x7, x8, x9);
+}
+
+void Problem::AddParameterBlock(double* values, int size) {
+ problem_impl_->AddParameterBlock(values, size);
+}
+
+void Problem::AddParameterBlock(double* values,
+ int size,
+ LocalParameterization* local_parameterization) {
+ problem_impl_->AddParameterBlock(values, size, local_parameterization);
+}
+
+void Problem::SetParameterBlockConstant(double* values) {
+ problem_impl_->SetParameterBlockConstant(values);
+}
+
+void Problem::SetParameterBlockVariable(double* values) {
+ problem_impl_->SetParameterBlockVariable(values);
+}
+
+void Problem::SetParameterization(
+ double* values,
+ LocalParameterization* local_parameterization) {
+ problem_impl_->SetParameterization(values, local_parameterization);
+}
+
+int Problem::NumParameterBlocks() const {
+ return problem_impl_->NumParameterBlocks();
+}
+
+int Problem::NumParameters() const {
+ return problem_impl_->NumParameters();
+}
+
+int Problem::NumResidualBlocks() const {
+ return problem_impl_->NumResidualBlocks();
+}
+
+int Problem::NumResiduals() const {
+ return problem_impl_->NumResiduals();
+}
+
+} // namespace ceres
diff --git a/internal/ceres/problem_impl.cc b/internal/ceres/problem_impl.cc
new file mode 100644
index 0000000..f061e33
--- /dev/null
+++ b/internal/ceres/problem_impl.cc
@@ -0,0 +1,427 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+// keir@google.com (Keir Mierle)
+
+#include "ceres/problem_impl.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <set>
+#include <string>
+#include <utility>
+#include <vector>
+#include "ceres/cost_function.h"
+#include "ceres/loss_function.h"
+#include "ceres/map_util.h"
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/stl_util.h"
+#include "ceres/stringprintf.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+typedef map<double*, internal::ParameterBlock*> ParameterMap;
+
+// Returns true if two regions of memory, a and b, with sizes size_a and size_b
+// respectively, overlap.
+static bool RegionsAlias(const double* a, int size_a,
+ const double* b, int size_b) {
+ return (a < b) ? b < (a + size_a)
+ : a < (b + size_b);
+}
+
+static void CheckForNoAliasing(double* existing_block,
+ int existing_block_size,
+ double* new_block,
+ int new_block_size) {
+ CHECK(!RegionsAlias(existing_block, existing_block_size,
+ new_block, new_block_size))
+ << "Aliasing detected between existing parameter block at memory "
+ << "location " << existing_block
+ << " and has size " << existing_block_size << " with new parameter "
+ << "block that has memory adderss " << new_block << " and would have "
+ << "size " << new_block_size << ".";
+}
+
+static ParameterBlock* InternalAddParameterBlock(
+ double* values,
+ int size,
+ ParameterMap* parameter_map,
+ vector<ParameterBlock*>* parameter_blocks) {
+ CHECK(values) << "Null pointer passed to AddParameterBlock for a parameter "
+ << "with size " << size;
+
+ // Ignore the request if there is a block for the given pointer already.
+ ParameterMap::iterator it = parameter_map->find(values);
+ if (it != parameter_map->end()) {
+ int existing_size = it->second->Size();
+ CHECK(size == existing_size)
+ << "Tried adding a parameter block with the same double pointer, "
+ << values << ", twice, but with different block sizes. Original "
+ << "size was " << existing_size << " but new size is "
+ << size;
+ return it->second;
+ }
+ // Before adding the parameter block, also check that it doesn't alias any
+ // other parameter blocks.
+ if (!parameter_map->empty()) {
+ ParameterMap::iterator lb = parameter_map->lower_bound(values);
+
+ // If lb is not the first block, check the previous block for aliasing.
+ if (lb != parameter_map->begin()) {
+ ParameterMap::iterator previous = lb;
+ --previous;
+ CheckForNoAliasing(previous->first,
+ previous->second->Size(),
+ values,
+ size);
+ }
+
+ // If lb is not off the end, check lb for aliasing.
+ if (lb != parameter_map->end()) {
+ CheckForNoAliasing(lb->first,
+ lb->second->Size(),
+ values,
+ size);
+ }
+ }
+ ParameterBlock* new_parameter_block = new ParameterBlock(values, size);
+ (*parameter_map)[values] = new_parameter_block;
+ parameter_blocks->push_back(new_parameter_block);
+ return new_parameter_block;
+}
+
+ProblemImpl::ProblemImpl() : program_(new internal::Program) {}
+ProblemImpl::ProblemImpl(const Problem::Options& options)
+ : options_(options),
+ program_(new internal::Program) {}
+
+ProblemImpl::~ProblemImpl() {
+ // Collect the unique cost/loss functions and delete the residuals.
+ set<CostFunction*> cost_functions;
+ set<LossFunction*> loss_functions;
+ for (int i = 0; i < program_->residual_blocks_.size(); ++i) {
+ ResidualBlock* residual_block = program_->residual_blocks_[i];
+
+ // The const casts here are legit, since ResidualBlock holds these
+ // pointers as const pointers but we have ownership of them and
+ // have the right to destroy them when the destructor is called.
+ if (options_.cost_function_ownership == TAKE_OWNERSHIP) {
+ cost_functions.insert(
+ const_cast<CostFunction*>(residual_block->cost_function()));
+ }
+ if (options_.loss_function_ownership == TAKE_OWNERSHIP) {
+ loss_functions.insert(
+ const_cast<LossFunction*>(residual_block->loss_function()));
+ }
+
+ delete residual_block;
+ }
+
+ // Collect the unique parameterizations and delete the parameters.
+ set<LocalParameterization*> local_parameterizations;
+ for (int i = 0; i < program_->parameter_blocks_.size(); ++i) {
+ ParameterBlock* parameter_block = program_->parameter_blocks_[i];
+
+ if (options_.local_parameterization_ownership == TAKE_OWNERSHIP) {
+ local_parameterizations.insert(parameter_block->local_parameterization_);
+ }
+
+ delete parameter_block;
+ }
+
+ // Delete the owned cost/loss functions and parameterizations.
+ STLDeleteContainerPointers(local_parameterizations.begin(),
+ local_parameterizations.end());
+ STLDeleteContainerPointers(cost_functions.begin(),
+ cost_functions.end());
+ STLDeleteContainerPointers(loss_functions.begin(),
+ loss_functions.end());
+}
+
+const ResidualBlock* ProblemImpl::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ const vector<double*>& parameter_blocks) {
+ CHECK_NOTNULL(cost_function);
+ CHECK_EQ(parameter_blocks.size(),
+ cost_function->parameter_block_sizes().size());
+
+ // Check the sizes match.
+ const vector<int16>& parameter_block_sizes =
+ cost_function->parameter_block_sizes();
+ CHECK_EQ(parameter_block_sizes.size(), parameter_blocks.size())
+ << "Number of blocks input is different than the number of blocks "
+ << "that the cost function expects.";
+
+ // Check for duplicate parameter blocks.
+ vector<double*> sorted_parameter_blocks(parameter_blocks);
+ sort(sorted_parameter_blocks.begin(), sorted_parameter_blocks.end());
+ vector<double*>::const_iterator duplicate_items =
+ unique(sorted_parameter_blocks.begin(),
+ sorted_parameter_blocks.end());
+ if (duplicate_items != sorted_parameter_blocks.end()) {
+ string blocks;
+ for (int i = 0; i < parameter_blocks.size(); ++i) {
+ blocks += internal::StringPrintf(" %p ", parameter_blocks[i]);
+ }
+
+ LOG(FATAL) << "Duplicate parameter blocks in a residual parameter "
+ << "are not allowed. Parameter block pointers: ["
+ << blocks << "]";
+ }
+
+ // Add parameter blocks and convert the double*'s to parameter blocks.
+ vector<ParameterBlock*> parameter_block_ptrs(parameter_blocks.size());
+ for (int i = 0; i < parameter_blocks.size(); ++i) {
+ parameter_block_ptrs[i] =
+ InternalAddParameterBlock(parameter_blocks[i],
+ parameter_block_sizes[i],
+ &parameter_block_map_,
+ &program_->parameter_blocks_);
+ }
+
+ // Check that the block sizes match the block sizes expected by the
+ // cost_function.
+ for (int i = 0; i < parameter_block_ptrs.size(); ++i) {
+ CHECK_EQ(cost_function->parameter_block_sizes()[i],
+ parameter_block_ptrs[i]->Size())
+ << "The cost function expects parameter block " << i
+ << " of size " << cost_function->parameter_block_sizes()[i]
+ << " but was given a block of size "
+ << parameter_block_ptrs[i]->Size();
+ }
+
+ ResidualBlock* new_residual_block =
+ new ResidualBlock(cost_function,
+ loss_function,
+ parameter_block_ptrs);
+ program_->residual_blocks_.push_back(new_residual_block);
+ return new_residual_block;
+}
+
+// Unfortunately, macros don't help much to reduce this code, and var args don't
+// work because of the ambiguous case that there is no loss function.
+const ResidualBlock* ProblemImpl::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0) {
+ vector<double*> residual_parameters;
+ residual_parameters.push_back(x0);
+ return AddResidualBlock(cost_function, loss_function, residual_parameters);
+}
+
+const ResidualBlock* ProblemImpl::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1) {
+ vector<double*> residual_parameters;
+ residual_parameters.push_back(x0);
+ residual_parameters.push_back(x1);
+ return AddResidualBlock(cost_function, loss_function, residual_parameters);
+}
+
+const ResidualBlock* ProblemImpl::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2) {
+ vector<double*> residual_parameters;
+ residual_parameters.push_back(x0);
+ residual_parameters.push_back(x1);
+ residual_parameters.push_back(x2);
+ return AddResidualBlock(cost_function, loss_function, residual_parameters);
+}
+
+const ResidualBlock* ProblemImpl::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3) {
+ vector<double*> residual_parameters;
+ residual_parameters.push_back(x0);
+ residual_parameters.push_back(x1);
+ residual_parameters.push_back(x2);
+ residual_parameters.push_back(x3);
+ return AddResidualBlock(cost_function, loss_function, residual_parameters);
+}
+
+const ResidualBlock* ProblemImpl::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3, double* x4) {
+ vector<double*> residual_parameters;
+ residual_parameters.push_back(x0);
+ residual_parameters.push_back(x1);
+ residual_parameters.push_back(x2);
+ residual_parameters.push_back(x3);
+ residual_parameters.push_back(x4);
+ return AddResidualBlock(cost_function, loss_function, residual_parameters);
+}
+
+const ResidualBlock* ProblemImpl::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3, double* x4, double* x5) {
+ vector<double*> residual_parameters;
+ residual_parameters.push_back(x0);
+ residual_parameters.push_back(x1);
+ residual_parameters.push_back(x2);
+ residual_parameters.push_back(x3);
+ residual_parameters.push_back(x4);
+ residual_parameters.push_back(x5);
+ return AddResidualBlock(cost_function, loss_function, residual_parameters);
+}
+
+const ResidualBlock* ProblemImpl::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
+ double* x6) {
+ vector<double*> residual_parameters;
+ residual_parameters.push_back(x0);
+ residual_parameters.push_back(x1);
+ residual_parameters.push_back(x2);
+ residual_parameters.push_back(x3);
+ residual_parameters.push_back(x4);
+ residual_parameters.push_back(x5);
+ residual_parameters.push_back(x6);
+ return AddResidualBlock(cost_function, loss_function, residual_parameters);
+}
+
+const ResidualBlock* ProblemImpl::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
+ double* x6, double* x7) {
+ vector<double*> residual_parameters;
+ residual_parameters.push_back(x0);
+ residual_parameters.push_back(x1);
+ residual_parameters.push_back(x2);
+ residual_parameters.push_back(x3);
+ residual_parameters.push_back(x4);
+ residual_parameters.push_back(x5);
+ residual_parameters.push_back(x6);
+ residual_parameters.push_back(x7);
+ return AddResidualBlock(cost_function, loss_function, residual_parameters);
+}
+
+const ResidualBlock* ProblemImpl::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
+ double* x6, double* x7, double* x8) {
+ vector<double*> residual_parameters;
+ residual_parameters.push_back(x0);
+ residual_parameters.push_back(x1);
+ residual_parameters.push_back(x2);
+ residual_parameters.push_back(x3);
+ residual_parameters.push_back(x4);
+ residual_parameters.push_back(x5);
+ residual_parameters.push_back(x6);
+ residual_parameters.push_back(x7);
+ residual_parameters.push_back(x8);
+ return AddResidualBlock(cost_function, loss_function, residual_parameters);
+}
+
+const ResidualBlock* ProblemImpl::AddResidualBlock(
+ CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
+ double* x6, double* x7, double* x8, double* x9) {
+ vector<double*> residual_parameters;
+ residual_parameters.push_back(x0);
+ residual_parameters.push_back(x1);
+ residual_parameters.push_back(x2);
+ residual_parameters.push_back(x3);
+ residual_parameters.push_back(x4);
+ residual_parameters.push_back(x5);
+ residual_parameters.push_back(x6);
+ residual_parameters.push_back(x7);
+ residual_parameters.push_back(x8);
+ residual_parameters.push_back(x9);
+ return AddResidualBlock(cost_function, loss_function, residual_parameters);
+}
+
+void ProblemImpl::AddParameterBlock(double* values, int size) {
+ InternalAddParameterBlock(values,
+ size,
+ &parameter_block_map_,
+ &program_->parameter_blocks_);
+}
+
+void ProblemImpl::AddParameterBlock(
+ double* values,
+ int size,
+ LocalParameterization* local_parameterization) {
+ ParameterBlock* parameter_block =
+ InternalAddParameterBlock(values,
+ size,
+ &parameter_block_map_,
+ &program_->parameter_blocks_);
+ if (local_parameterization != NULL) {
+ parameter_block->SetParameterization(local_parameterization);
+ }
+}
+
+void ProblemImpl::SetParameterBlockConstant(double* values) {
+ FindOrDie(parameter_block_map_, values)->SetConstant();
+}
+
+void ProblemImpl::SetParameterBlockVariable(double* values) {
+ FindOrDie(parameter_block_map_, values)->SetVarying();
+}
+
+void ProblemImpl::SetParameterization(
+ double* values,
+ LocalParameterization* local_parameterization) {
+ FindOrDie(parameter_block_map_, values)
+ ->SetParameterization(local_parameterization);
+}
+
+int ProblemImpl::NumParameterBlocks() const {
+ return program_->NumParameterBlocks();
+}
+
+int ProblemImpl::NumParameters() const {
+ return program_->NumParameters();
+}
+
+int ProblemImpl::NumResidualBlocks() const {
+ return program_->NumResidualBlocks();
+}
+
+int ProblemImpl::NumResiduals() const {
+ return program_->NumResiduals();
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/problem_impl.h b/internal/ceres/problem_impl.h
new file mode 100644
index 0000000..2f20d0d
--- /dev/null
+++ b/internal/ceres/problem_impl.h
@@ -0,0 +1,148 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// This is the implementation of the public Problem API. The pointer to
+// implementation (PIMPL) idiom makes it possible for Ceres internal code to
+// refer to the private data members without needing to exposing it to the
+// world. An alternative to PIMPL is to have a factory which returns instances
+// of a virtual base class; while that approach would work, it requires clients
+// to always put a Problem object into a scoped pointer; this needlessly muddies
+// client code for little benefit. Therefore, the PIMPL comprise was chosen.
+
+#ifndef CERES_PUBLIC_PROBLEM_IMPL_H_
+#define CERES_PUBLIC_PROBLEM_IMPL_H_
+
+#include <map>
+#include <vector>
+
+#include "ceres/internal/macros.h"
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/problem.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+class CostFunction;
+class LossFunction;
+class LocalParameterization;
+
+namespace internal {
+
+class Program;
+class ResidualBlock;
+
+class ProblemImpl {
+ public:
+ typedef map<double*, ParameterBlock*> ParameterMap;
+
+ ProblemImpl();
+ explicit ProblemImpl(const Problem::Options& options);
+
+ ~ProblemImpl();
+
+ // See the public problem.h file for description of these methods.
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ const vector<double*>& parameter_blocks);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3, double* x4);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3, double* x4, double* x5);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3, double* x4, double* x5,
+ double* x6);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3, double* x4, double* x5,
+ double* x6, double* x7);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3, double* x4, double* x5,
+ double* x6, double* x7, double* x8);
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0, double* x1, double* x2,
+ double* x3, double* x4, double* x5,
+ double* x6, double* x7, double* x8,
+ double* x9);
+ void AddParameterBlock(double* values, int size);
+ void AddParameterBlock(double* values,
+ int size,
+ LocalParameterization* local_parameterization);
+ void SetParameterBlockConstant(double* values);
+ void SetParameterBlockVariable(double* values);
+ void SetParameterization(double* values,
+ LocalParameterization* local_parameterization);
+ int NumParameterBlocks() const;
+ int NumParameters() const;
+ int NumResidualBlocks() const;
+ int NumResiduals() const;
+
+ const Program& program() const { return *program_; }
+ Program* mutable_program() { return program_.get(); }
+
+ const ParameterMap& parameter_map() const { return parameter_block_map_; }
+
+ private:
+ const Problem::Options options_;
+
+ // The mapping from user pointers to parameter blocks.
+ map<double*, ParameterBlock*> parameter_block_map_;
+
+ internal::scoped_ptr<internal::Program> program_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(ProblemImpl);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_PUBLIC_PROBLEM_IMPL_H_
diff --git a/internal/ceres/problem_test.cc b/internal/ceres/problem_test.cc
new file mode 100644
index 0000000..4afe1b5
--- /dev/null
+++ b/internal/ceres/problem_test.cc
@@ -0,0 +1,335 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+// keir@google.com (Keir Mierle)
+
+#include "ceres/problem.h"
+
+#include "gtest/gtest.h"
+#include "ceres/cost_function.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+// The following three classes are for the purposes of defining
+// function signatures. They have dummy Evaluate functions.
+
+// Trivial cost function that accepts a single argument.
+class UnaryCostFunction : public CostFunction {
+ public:
+ UnaryCostFunction(int num_residuals, int16 parameter_block_size) {
+ set_num_residuals(num_residuals);
+ mutable_parameter_block_sizes()->push_back(parameter_block_size);
+ }
+ virtual ~UnaryCostFunction() {}
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ for (int i = 0; i < num_residuals(); ++i) {
+ residuals[i] = 1;
+ }
+ return true;
+ }
+};
+
+// Trivial cost function that accepts two arguments.
+class BinaryCostFunction: public CostFunction {
+ public:
+ BinaryCostFunction(int num_residuals,
+ int16 parameter_block1_size,
+ int16 parameter_block2_size) {
+ set_num_residuals(num_residuals);
+ mutable_parameter_block_sizes()->push_back(parameter_block1_size);
+ mutable_parameter_block_sizes()->push_back(parameter_block2_size);
+ }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ for (int i = 0; i < num_residuals(); ++i) {
+ residuals[i] = 2;
+ }
+ return true;
+ }
+};
+
+// Trivial cost function that accepts three arguments.
+class TernaryCostFunction: public CostFunction {
+ public:
+ TernaryCostFunction(int num_residuals,
+ int16 parameter_block1_size,
+ int16 parameter_block2_size,
+ int16 parameter_block3_size) {
+ set_num_residuals(num_residuals);
+ mutable_parameter_block_sizes()->push_back(parameter_block1_size);
+ mutable_parameter_block_sizes()->push_back(parameter_block2_size);
+ mutable_parameter_block_sizes()->push_back(parameter_block3_size);
+ }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ for (int i = 0; i < num_residuals(); ++i) {
+ residuals[i] = 3;
+ }
+ return true;
+ }
+};
+
+TEST(Problem, AddResidualWithNullCostFunctionDies) {
+ double x[3], y[4], z[5];
+
+ Problem problem;
+ problem.AddParameterBlock(x, 3);
+ problem.AddParameterBlock(y, 4);
+ problem.AddParameterBlock(z, 5);
+
+ EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(NULL, NULL, x),
+ "'cost_function' Must be non NULL");
+}
+
+TEST(Problem, AddResidualWithIncorrectNumberOfParameterBlocksDies) {
+ double x[3], y[4], z[5];
+
+ Problem problem;
+ problem.AddParameterBlock(x, 3);
+ problem.AddParameterBlock(y, 4);
+ problem.AddParameterBlock(z, 5);
+
+ // UnaryCostFunction takes only one parameter, but two are passed.
+ EXPECT_DEATH_IF_SUPPORTED(
+ problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x, y),
+ "parameter_blocks.size()");
+}
+
+TEST(Problem, AddResidualWithDifferentSizesOnTheSameVariableDies) {
+ double x[3];
+
+ Problem problem;
+ problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+ EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(
+ new UnaryCostFunction(
+ 2, 4 /* 4 != 3 */), NULL, x),
+ "different block sizes");
+}
+
+TEST(Problem, AddResidualWithDuplicateParametersDies) {
+ double x[3], z[5];
+
+ Problem problem;
+ EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(
+ new BinaryCostFunction(2, 3, 3), NULL, x, x),
+ "Duplicate parameter blocks");
+ EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(
+ new TernaryCostFunction(1, 5, 3, 5),
+ NULL, z, x, z),
+ "Duplicate parameter blocks");
+}
+
+TEST(Problem, AddResidualWithIncorrectSizesOfParameterBlockDies) {
+ double x[3], y[4], z[5];
+
+ Problem problem;
+ problem.AddParameterBlock(x, 3);
+ problem.AddParameterBlock(y, 4);
+ problem.AddParameterBlock(z, 5);
+
+ // The cost function expects the size of the second parameter, z, to be 4
+ // instead of 5 as declared above. This is fatal.
+ EXPECT_DEATH_IF_SUPPORTED(problem.AddResidualBlock(
+ new BinaryCostFunction(2, 3, 4), NULL, x, z),
+ "different block sizes");
+}
+
+TEST(Problem, AddResidualAddsDuplicatedParametersOnlyOnce) {
+ double x[3], y[4], z[5];
+
+ Problem problem;
+ problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+ problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+ problem.AddResidualBlock(new UnaryCostFunction(2, 4), NULL, y);
+ problem.AddResidualBlock(new UnaryCostFunction(2, 5), NULL, z);
+
+ EXPECT_EQ(3, problem.NumParameterBlocks());
+ EXPECT_EQ(12, problem.NumParameters());
+}
+
+TEST(Problem, AddParameterWithDifferentSizesOnTheSameVariableDies) {
+ double x[3], y[4];
+
+ Problem problem;
+ problem.AddParameterBlock(x, 3);
+ problem.AddParameterBlock(y, 4);
+
+ EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(x, 4),
+ "different block sizes");
+}
+
+static double *IntToPtr(int i) {
+ return reinterpret_cast<double*>(sizeof(double) * i); // NOLINT
+}
+
+TEST(Problem, AddParameterWithAliasedParametersDies) {
+ // Layout is
+ //
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
+ // [x] x x x x [y] y y
+ // o==o==o o==o==o o==o
+ // o--o--o o--o--o o--o o--o--o
+ //
+ // Parameter block additions are tested as listed above; expected successful
+ // ones marked with o==o and aliasing ones marked with o--o.
+
+ Problem problem;
+ problem.AddParameterBlock(IntToPtr(5), 5); // x
+ problem.AddParameterBlock(IntToPtr(13), 3); // y
+
+ EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr( 4), 2),
+ "Aliasing detected");
+ EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr( 4), 3),
+ "Aliasing detected");
+ EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr( 4), 9),
+ "Aliasing detected");
+ EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr( 8), 3),
+ "Aliasing detected");
+ EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr(12), 2),
+ "Aliasing detected");
+ EXPECT_DEATH_IF_SUPPORTED(problem.AddParameterBlock(IntToPtr(14), 3),
+ "Aliasing detected");
+
+ // These ones should work.
+ problem.AddParameterBlock(IntToPtr( 2), 3);
+ problem.AddParameterBlock(IntToPtr(10), 3);
+ problem.AddParameterBlock(IntToPtr(16), 2);
+
+ ASSERT_EQ(5, problem.NumParameterBlocks());
+}
+
+TEST(Problem, AddParameterIgnoresDuplicateCalls) {
+ double x[3], y[4];
+
+ Problem problem;
+ problem.AddParameterBlock(x, 3);
+ problem.AddParameterBlock(y, 4);
+
+ // Creating parameter blocks multiple times is ignored.
+ problem.AddParameterBlock(x, 3);
+ problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+
+ // ... even repeatedly.
+ problem.AddParameterBlock(x, 3);
+ problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+
+ // More parameters are fine.
+ problem.AddParameterBlock(y, 4);
+ problem.AddResidualBlock(new UnaryCostFunction(2, 4), NULL, y);
+
+ EXPECT_EQ(2, problem.NumParameterBlocks());
+ EXPECT_EQ(7, problem.NumParameters());
+}
+
+TEST(Problem, AddingParametersAndResidualsResultsInExpectedProblem) {
+ double x[3], y[4], z[5], w[4];
+
+ Problem problem;
+ problem.AddParameterBlock(x, 3);
+ EXPECT_EQ(1, problem.NumParameterBlocks());
+ EXPECT_EQ(3, problem.NumParameters());
+
+ problem.AddParameterBlock(y, 4);
+ EXPECT_EQ(2, problem.NumParameterBlocks());
+ EXPECT_EQ(7, problem.NumParameters());
+
+ problem.AddParameterBlock(z, 5);
+ EXPECT_EQ(3, problem.NumParameterBlocks());
+ EXPECT_EQ(12, problem.NumParameters());
+
+ // Add a parameter that has a local parameterization.
+ w[0] = 1.0; w[1] = 0.0; w[2] = 0.0; w[3] = 0.0;
+ problem.AddParameterBlock(w, 4, new QuaternionParameterization);
+ EXPECT_EQ(4, problem.NumParameterBlocks());
+ EXPECT_EQ(16, problem.NumParameters());
+
+ problem.AddResidualBlock(new UnaryCostFunction(2, 3), NULL, x);
+ problem.AddResidualBlock(new BinaryCostFunction(6, 5, 4) , NULL, z, y);
+ problem.AddResidualBlock(new BinaryCostFunction(3, 3, 5), NULL, x, z);
+ problem.AddResidualBlock(new BinaryCostFunction(7, 5, 3), NULL, z, x);
+ problem.AddResidualBlock(new TernaryCostFunction(1, 5, 3, 4), NULL, z, x, y);
+
+ const int total_residuals = 2 + 6 + 3 + 7 + 1;
+ EXPECT_EQ(problem.NumResidualBlocks(), 5);
+ EXPECT_EQ(problem.NumResiduals(), total_residuals);
+}
+
+class DestructorCountingCostFunction : public SizedCostFunction<3, 4, 5> {
+ public:
+ explicit DestructorCountingCostFunction(int *counter)
+ : counter_(counter) {}
+
+ virtual ~DestructorCountingCostFunction() {
+ *counter_ += 1;
+ }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ return true;
+ }
+
+ private:
+ int* counter_;
+};
+
+TEST(Problem, ReusedCostFunctionsAreOnlyDeletedOnce) {
+ double y[4], z[5];
+ int counter = 0;
+
+ // Add a cost function multiple times and check to make sure that
+ // the destructor on the cost function is only called once.
+ {
+ Problem problem;
+ problem.AddParameterBlock(y, 4);
+ problem.AddParameterBlock(z, 5);
+
+ CostFunction* cost = new DestructorCountingCostFunction(&counter);
+ problem.AddResidualBlock(cost, NULL, y, z);
+ problem.AddResidualBlock(cost, NULL, y, z);
+ problem.AddResidualBlock(cost, NULL, y, z);
+ }
+
+ // Check that the destructor was called only once.
+ CHECK_EQ(counter, 1);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/program.cc b/internal/ceres/program.cc
new file mode 100644
index 0000000..82d76d3
--- /dev/null
+++ b/internal/ceres/program.cc
@@ -0,0 +1,232 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/program.h"
+
+#include <map>
+#include <vector>
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/cost_function.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/port.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/loss_function.h"
+#include "ceres/map_util.h"
+#include "ceres/parameter_block.h"
+#include "ceres/problem.h"
+#include "ceres/residual_block.h"
+#include "ceres/stl_util.h"
+
+namespace ceres {
+namespace internal {
+
+Program::Program() {}
+
+Program::Program(const Program& program)
+ : parameter_blocks_(program.parameter_blocks_),
+ residual_blocks_(program.residual_blocks_) {
+}
+
+const vector<ParameterBlock*>& Program::parameter_blocks() const {
+ return parameter_blocks_;
+}
+
+const vector<ResidualBlock*>& Program::residual_blocks() const {
+ return residual_blocks_;
+}
+
+vector<ParameterBlock*>* Program::mutable_parameter_blocks() {
+ return &parameter_blocks_;
+}
+
+vector<ResidualBlock*>* Program::mutable_residual_blocks() {
+ return &residual_blocks_;
+}
+
+bool Program::StateVectorToParameterBlocks(const double *state) {
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ if (!parameter_blocks_[i]->IsConstant() &&
+ !parameter_blocks_[i]->SetState(state)) {
+ return false;
+ }
+ state += parameter_blocks_[i]->Size();
+ }
+ return true;
+}
+
+void Program::ParameterBlocksToStateVector(double *state) const {
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ parameter_blocks_[i]->GetState(state);
+ state += parameter_blocks_[i]->Size();
+ }
+}
+
+void Program::CopyParameterBlockStateToUserState() {
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ parameter_blocks_[i]->GetState(parameter_blocks_[i]->mutable_user_state());
+ }
+}
+
+bool Program::SetParameterBlockStatePtrsToUserStatePtrs() {
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ if (!parameter_blocks_[i]->IsConstant() &&
+ !parameter_blocks_[i]->SetState(parameter_blocks_[i]->user_state())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Program::Plus(const double* state,
+ const double* delta,
+ double* state_plus_delta) const {
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ if (!parameter_blocks_[i]->Plus(state, delta, state_plus_delta)) {
+ return false;
+ }
+ state += parameter_blocks_[i]->Size();
+ delta += parameter_blocks_[i]->LocalSize();
+ state_plus_delta += parameter_blocks_[i]->Size();
+ }
+ return true;
+}
+
+void Program::SetParameterOffsetsAndIndex() {
+ // Set positions for all parameters appearing as arguments to residuals to one
+ // past the end of the parameter block array.
+ for (int i = 0; i < residual_blocks_.size(); ++i) {
+ ResidualBlock* residual_block = residual_blocks_[i];
+ for (int j = 0; j < residual_block->NumParameterBlocks(); ++j) {
+ residual_block->parameter_blocks()[j]->set_index(-1);
+ }
+ }
+ // For parameters that appear in the program, set their position and offset.
+ int state_offset = 0;
+ int delta_offset = 0;
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ parameter_blocks_[i]->set_index(i);
+ parameter_blocks_[i]->set_state_offset(state_offset);
+ parameter_blocks_[i]->set_delta_offset(delta_offset);
+ state_offset += parameter_blocks_[i]->Size();
+ delta_offset += parameter_blocks_[i]->LocalSize();
+ }
+}
+
+int Program::NumResidualBlocks() const {
+ return residual_blocks_.size();
+}
+
+int Program::NumParameterBlocks() const {
+ return parameter_blocks_.size();
+}
+
+int Program::NumResiduals() const {
+ int num_residuals = 0;
+ for (int i = 0; i < residual_blocks_.size(); ++i) {
+ num_residuals += residual_blocks_[i]->NumResiduals();
+ }
+ return num_residuals;
+}
+
+int Program::NumParameters() const {
+ int num_parameters = 0;
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ num_parameters += parameter_blocks_[i]->Size();
+ }
+ return num_parameters;
+}
+
+int Program::NumEffectiveParameters() const {
+ int num_parameters = 0;
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ num_parameters += parameter_blocks_[i]->LocalSize();
+ }
+ return num_parameters;
+}
+
+int Program::MaxScratchDoublesNeededForEvaluate() const {
+ // Compute the scratch space needed for evaluate.
+ int max_scratch_bytes_for_evaluate = 0;
+ for (int i = 0; i < residual_blocks_.size(); ++i) {
+ max_scratch_bytes_for_evaluate =
+ max(max_scratch_bytes_for_evaluate,
+ residual_blocks_[i]->NumScratchDoublesForEvaluate());
+ }
+ return max_scratch_bytes_for_evaluate;
+}
+
+int Program::MaxDerivativesPerResidualBlock() const {
+ int max_derivatives = 0;
+ for (int i = 0; i < residual_blocks_.size(); ++i) {
+ int derivatives = 0;
+ ResidualBlock* residual_block = residual_blocks_[i];
+ int num_parameters = residual_block->NumParameterBlocks();
+ for (int j = 0; j < num_parameters; ++j) {
+ derivatives += residual_block->NumResiduals() *
+ residual_block->parameter_blocks()[j]->LocalSize();
+ }
+ max_derivatives = max(max_derivatives, derivatives);
+ }
+ return max_derivatives;
+}
+
+int Program::MaxParametersPerResidualBlock() const {
+ int max_parameters = 0;
+ for (int i = 0; i < residual_blocks_.size(); ++i) {
+ max_parameters = max(max_parameters,
+ residual_blocks_[i]->NumParameterBlocks());
+ }
+ return max_parameters;
+}
+
+int Program::MaxResidualsPerResidualBlock() const {
+ int max_residuals = 0;
+ for (int i = 0; i < residual_blocks_.size(); ++i) {
+ max_residuals = max(max_residuals,
+ residual_blocks_[i]->NumResiduals());
+ }
+ return max_residuals;
+}
+
+string Program::ToString() const {
+ string ret = "Program dump\n";
+ ret += StringPrintf("Number of parameter blocks: %d\n", NumParameterBlocks());
+ ret += StringPrintf("Number of parameters: %d\n", NumParameters());
+ ret += "Parameters:\n";
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ ret += StringPrintf("%d: %s\n",
+ i, parameter_blocks_[i]->ToString().c_str());
+ }
+ return ret;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/program.h b/internal/ceres/program.h
new file mode 100644
index 0000000..5002b7e
--- /dev/null
+++ b/internal/ceres/program.h
@@ -0,0 +1,129 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_PROGRAM_H_
+#define CERES_INTERNAL_PROGRAM_H_
+
+#include <string>
+#include <vector>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+class ParameterBlock;
+class ProblemImpl;
+class ResidualBlock;
+
+// A nonlinear least squares optimization problem. This is different from the
+// similarly-named "Problem" object, which offers a mutation interface for
+// adding and modifying parameters and residuals. The Program contains the core
+// part of the Problem, which is the parameters and the residuals, stored in a
+// particular ordering. The ordering is critical, since it defines the mapping
+// between (residual, parameter) pairs and a position in the jacobian of the
+// objective function. Various parts of Ceres transform one Program into
+// another; for example, the first stage of solving involves stripping all
+// constant parameters and residuals. This is in contrast with Problem, which is
+// not built for transformation.
+class Program {
+ public:
+ Program();
+ explicit Program(const Program& program);
+
+ // The ordered parameter and residual blocks for the program.
+ const vector<ParameterBlock*>& parameter_blocks() const;
+ const vector<ResidualBlock*>& residual_blocks() const;
+ vector<ParameterBlock*>* mutable_parameter_blocks();
+ vector<ResidualBlock*>* mutable_residual_blocks();
+
+ // Serialize to/from the program and update states.
+ //
+ // NOTE: Setting the state of a parameter block can trigger the
+ // computation of the Jacobian of its local parameterization. If
+ // this computation fails for some reason, then this method returns
+ // false and the state of the parameter blocks cannot be trusted.
+ bool StateVectorToParameterBlocks(const double *state);
+ void ParameterBlocksToStateVector(double *state) const;
+
+ // Copy internal state to the user's parameters.
+ void CopyParameterBlockStateToUserState();
+
+ // Set the parameter block pointers to the user pointers. Since this
+ // runs parameter block set state internally, which may call local
+ // parameterizations, this can fail. False is returned on failure.
+ bool SetParameterBlockStatePtrsToUserStatePtrs();
+
+ // Update a state vector for the program given a delta.
+ bool Plus(const double* state,
+ const double* delta,
+ double* state_plus_delta) const;
+
+ // Set the parameter indices and offsets. This permits mapping backward
+ // from a ParameterBlock* to an index in the parameter_blocks() vector. For
+ // any parameter block p, after calling SetParameterOffsetsAndIndex(), it
+ // is true that
+ //
+ // parameter_blocks()[p->index()] == p
+ //
+ // If a parameter appears in a residual but not in the parameter block, then
+ // it will have an index of -1.
+ //
+ // This also updates p->state_offset() and p->delta_offset(), which are the
+ // position of the parameter in the state and delta vector respectively.
+ void SetParameterOffsetsAndIndex();
+
+ // See problem.h for what these do.
+ int NumParameterBlocks() const;
+ int NumParameters() const;
+ int NumEffectiveParameters() const;
+ int NumResidualBlocks() const;
+ int NumResiduals() const;
+
+ int MaxScratchDoublesNeededForEvaluate() const;
+ int MaxDerivativesPerResidualBlock() const;
+ int MaxParametersPerResidualBlock() const;
+ int MaxResidualsPerResidualBlock() const;
+
+ // A human-readable dump of the parameter blocks for debugging.
+ // TODO(keir): If necessary, also dump the residual blocks.
+ string ToString() const;
+
+ private:
+ // The Program does not own the ParameterBlock or ResidualBlock objects.
+ vector<ParameterBlock*> parameter_blocks_;
+ vector<ResidualBlock*> residual_blocks_;
+
+ friend class ProblemImpl;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_PROGRAM_H_
diff --git a/internal/ceres/program_evaluator.h b/internal/ceres/program_evaluator.h
new file mode 100644
index 0000000..1fb44e1
--- /dev/null
+++ b/internal/ceres/program_evaluator.h
@@ -0,0 +1,335 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// The ProgramEvaluator runs the cost functions contained in each residual block
+// and stores the result into a jacobian. The particular type of jacobian is
+// abstracted out using two template parameters:
+//
+// - An "EvaluatePreparer" that is responsible for creating the array with
+// pointers to the jacobian blocks where the cost function evaluates to.
+// - A "JacobianWriter" that is responsible for storing the resulting
+// jacobian blocks in the passed sparse matrix.
+//
+// This abstraction affords an efficient evaluator implementation while still
+// supporting writing to multiple sparse matrix formats. For example, when the
+// ProgramEvaluator is parameterized for writing to block sparse matrices, the
+// residual jacobians are written directly into their final position in the
+// block sparse matrix by the user's CostFunction; there is no copying.
+//
+// The evaluation is threaded with OpenMP.
+//
+// The EvaluatePreparer and JacobianWriter interfaces are as follows:
+//
+// class EvaluatePreparer {
+// // Prepare the jacobians array for use as the destination of a call to
+// // a cost function's evaluate method.
+// void Prepare(const ResidualBlock* residual_block,
+// int residual_block_index,
+// SparseMatrix* jacobian,
+// double** jacobians);
+// }
+//
+// class JacobianWriter {
+// // Create a jacobian that this writer can write. Same as
+// // Evaluator::CreateJacobian.
+// SparseMatrix* CreateJacobian() const;
+//
+// // Create num_threads evaluate preparers. Caller owns result which must
+// // be freed with delete[]. Resulting preparers are valid while *this is.
+// EvaluatePreparer* CreateEvaluatePreparers(int num_threads);
+//
+// // Write the block jacobians from a residual block evaluation to the
+// // larger sparse jacobian.
+// void Write(int residual_id,
+// int residual_offset,
+// double** jacobians,
+// SparseMatrix* jacobian);
+// }
+//
+// Note: The ProgramEvaluator is not thread safe, since internally it maintains
+// some per-thread scratch space.
+
+#ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_
+#define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
+
+#ifdef CERES_USE_OPENMP
+#include <omp.h>
+#endif
+
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+template<typename EvaluatePreparer, typename JacobianWriter>
+class ProgramEvaluator : public Evaluator {
+ public:
+ ProgramEvaluator(const Evaluator::Options &options, Program* program)
+ : options_(options),
+ program_(program),
+ jacobian_writer_(options, program),
+ evaluate_preparers_(
+ jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) {
+#ifndef CERES_USE_OPENMP
+ CHECK_EQ(1, options_.num_threads)
+ << "OpenMP support is not compiled into this binary; "
+ << "only options.num_threads=1 is supported.";
+#endif
+
+ BuildResidualLayout(*program, &residual_layout_);
+ evaluate_scratch_.reset(CreateEvaluatorScratch(*program,
+ options.num_threads));
+ }
+
+ // Implementation of Evaluator interface.
+ SparseMatrix* CreateJacobian() const {
+ return jacobian_writer_.CreateJacobian();
+ }
+
+ bool Evaluate(const double* state,
+ double* cost,
+ double* residuals,
+ double* gradient,
+ SparseMatrix* jacobian) {
+ // The parameters are stateful, so set the state before evaluating.
+ if (!program_->StateVectorToParameterBlocks(state)) {
+ return false;
+ }
+
+ if (residuals != NULL) {
+ VectorRef(residuals, program_->NumResiduals()).setZero();
+ }
+
+ if (jacobian != NULL) {
+ jacobian->SetZero();
+ }
+
+ // Each thread gets it's own cost and evaluate scratch space.
+ for (int i = 0; i < options_.num_threads; ++i) {
+ evaluate_scratch_[i].cost = 0.0;
+ }
+
+ // This bool is used to disable the loop if an error is encountered
+ // without breaking out of it. The remaining loop iterations are still run,
+ // but with an empty body, and so will finish quickly.
+ bool abort = false;
+ int num_residual_blocks = program_->NumResidualBlocks();
+#pragma omp parallel for num_threads(options_.num_threads)
+ for (int i = 0; i < num_residual_blocks; ++i) {
+// Disable the loop instead of breaking, as required by OpenMP.
+#pragma omp flush(abort)
+ if (abort) {
+ continue;
+ }
+
+#ifdef CERES_USE_OPENMP
+ int thread_id = omp_get_thread_num();
+#else
+ int thread_id = 0;
+#endif
+ EvaluatePreparer* preparer = &evaluate_preparers_[thread_id];
+ EvaluateScratch* scratch = &evaluate_scratch_[thread_id];
+
+ // Prepare block residuals if requested.
+ const ResidualBlock* residual_block = program_->residual_blocks()[i];
+ double* block_residuals = NULL;
+ if (residuals != NULL) {
+ block_residuals = residuals + residual_layout_[i];
+ } else if (gradient != NULL) {
+ block_residuals = scratch->residual_block_residuals.get();
+ }
+
+ // Prepare block jacobians if requested.
+ double** block_jacobians = NULL;
+ if (jacobian != NULL || gradient != NULL) {
+ preparer->Prepare(residual_block,
+ i,
+ jacobian,
+ scratch->jacobian_block_ptrs.get());
+ block_jacobians = scratch->jacobian_block_ptrs.get();
+ }
+
+ // Evaluate the cost, residuals, and jacobians.
+ double block_cost;
+ if (!residual_block->Evaluate(
+ &block_cost,
+ block_residuals,
+ block_jacobians,
+ scratch->residual_block_evaluate_scratch.get())) {
+ abort = true;
+// This ensures that the OpenMP threads have a consistent view of 'abort'. Do
+// the flush inside the failure case so that there is usually only one
+// synchronization point per loop iteration instead of two.
+#pragma omp flush(abort)
+ continue;
+ }
+
+ scratch->cost += block_cost;
+
+ // Store the jacobians, if they were requested.
+ if (jacobian != NULL) {
+ jacobian_writer_.Write(i,
+ residual_layout_[i],
+ block_jacobians,
+ jacobian);
+ }
+
+ // Compute and store the gradient, if it was requested.
+ if (gradient != NULL) {
+ int num_residuals = residual_block->NumResiduals();
+ int num_parameter_blocks = residual_block->NumParameterBlocks();
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ const ParameterBlock* parameter_block =
+ residual_block->parameter_blocks()[j];
+ if (parameter_block->IsConstant()) {
+ continue;
+ }
+ MatrixRef block_jacobian(block_jacobians[j],
+ num_residuals,
+ parameter_block->LocalSize());
+ VectorRef block_gradient(scratch->gradient.get() +
+ parameter_block->delta_offset(),
+ parameter_block->LocalSize());
+ VectorRef block_residual(block_residuals, num_residuals);
+ block_gradient += block_residual.transpose() * block_jacobian;
+ }
+ }
+ }
+
+ if (!abort) {
+ // Sum the cost and gradient (if requested) from each thread.
+ (*cost) = 0.0;
+ int num_parameters = program_->NumEffectiveParameters();
+ if (gradient != NULL) {
+ VectorRef(gradient, num_parameters).setZero();
+ }
+ for (int i = 0; i < options_.num_threads; ++i) {
+ (*cost) += evaluate_scratch_[i].cost;
+ if (gradient != NULL) {
+ VectorRef(gradient, num_parameters) +=
+ VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
+ }
+ }
+ }
+ return !abort;
+ }
+
+ bool Plus(const double* state,
+ const double* delta,
+ double* state_plus_delta) const {
+ return program_->Plus(state, delta, state_plus_delta);
+ }
+
+ int NumParameters() const {
+ return program_->NumParameters();
+ }
+ int NumEffectiveParameters() const {
+ return program_->NumEffectiveParameters();
+ }
+
+ int NumResiduals() const {
+ return program_->NumResiduals();
+ }
+
+ private:
+ // Per-thread scratch space needed to evaluate and store each residual block.
+ struct EvaluateScratch {
+ void Init(int max_parameters_per_residual_block,
+ int max_scratch_doubles_needed_for_evaluate,
+ int max_residuals_per_residual_block,
+ int num_parameters) {
+ residual_block_evaluate_scratch.reset(
+ new double[max_scratch_doubles_needed_for_evaluate]);
+ gradient.reset(new double[num_parameters]);
+ VectorRef(gradient.get(), num_parameters).setZero();
+ residual_block_residuals.reset(
+ new double[max_residuals_per_residual_block]);
+ jacobian_block_ptrs.reset(
+ new double*[max_parameters_per_residual_block]);
+ }
+
+ double cost;
+ scoped_array<double> residual_block_evaluate_scratch;
+ // The gradient in the local parameterization.
+ scoped_array<double> gradient;
+ // Enough space to store the residual for the largest residual block.
+ scoped_array<double> residual_block_residuals;
+ scoped_array<double*> jacobian_block_ptrs;
+ };
+
+ static void BuildResidualLayout(const Program& program,
+ vector<int>* residual_layout) {
+ const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
+ residual_layout->resize(program.NumResidualBlocks());
+ int residual_pos = 0;
+ for (int i = 0; i < residual_blocks.size(); ++i) {
+ const int num_residuals = residual_blocks[i]->NumResiduals();
+ (*residual_layout)[i] = residual_pos;
+ residual_pos += num_residuals;
+ }
+ }
+
+ // Create scratch space for each thread evaluating the program.
+ static EvaluateScratch* CreateEvaluatorScratch(const Program& program,
+ int num_threads) {
+ int max_parameters_per_residual_block =
+ program.MaxParametersPerResidualBlock();
+ int max_scratch_doubles_needed_for_evaluate =
+ program.MaxScratchDoublesNeededForEvaluate();
+ int max_residuals_per_residual_block =
+ program.MaxResidualsPerResidualBlock();
+ int num_parameters = program.NumEffectiveParameters();
+
+ EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ evaluate_scratch[i].Init(max_parameters_per_residual_block,
+ max_scratch_doubles_needed_for_evaluate,
+ max_residuals_per_residual_block,
+ num_parameters);
+ }
+ return evaluate_scratch;
+ }
+
+ Evaluator::Options options_;
+ Program* program_;
+ JacobianWriter jacobian_writer_;
+ scoped_array<EvaluatePreparer> evaluate_preparers_;
+ scoped_array<EvaluateScratch> evaluate_scratch_;
+ vector<int> residual_layout_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_PROGRAM_EVALUATOR_H_
diff --git a/internal/ceres/random.h b/internal/ceres/random.h
new file mode 100644
index 0000000..352c003
--- /dev/null
+++ b/internal/ceres/random.h
@@ -0,0 +1,70 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+// sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_RANDOM_H_
+#define CERES_INTERNAL_RANDOM_H_
+
+#include <cmath>
+#include <cstdlib>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+inline void SetRandomState(int state) {
+ srand(state);
+}
+
+inline int Uniform(int n) {
+ return rand() % n;
+}
+
+inline double RandDouble() {
+ double r = static_cast<double>(rand());
+ return r / RAND_MAX;
+}
+
+// Box-Muller algorithm for normal random number generation.
+// http://en.wikipedia.org/wiki/Box-Muller_transform
+inline double RandNormal() {
+ double x1, x2, w;
+ do {
+ x1 = 2.0 * RandDouble() - 1.0;
+ x2 = 2.0 * RandDouble() - 1.0;
+ w = x1 * x1 + x2 * x2;
+ } while ( w >= 1.0 || w == 0.0 );
+
+ w = sqrt((-2.0 * log(w)) / w);
+ return x1 * w;
+}
+
+} // namespace ceres
+
+#endif // CERES_INTERNAL_RANDOM_H_
diff --git a/internal/ceres/residual_block.cc b/internal/ceres/residual_block.cc
new file mode 100644
index 0000000..bdb88b1
--- /dev/null
+++ b/internal/ceres/residual_block.cc
@@ -0,0 +1,215 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+// sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/residual_block.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <vector>
+
+#include "ceres/corrector.h"
+#include "ceres/parameter_block.h"
+#include "ceres/residual_block_utils.h"
+#include "ceres/cost_function.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/fixed_array.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/loss_function.h"
+
+namespace ceres {
+namespace internal {
+
+ResidualBlock::ResidualBlock(const CostFunction* cost_function,
+ const LossFunction* loss_function,
+ const vector<ParameterBlock*>& parameter_blocks)
+ : cost_function_(cost_function),
+ loss_function_(loss_function),
+ parameter_blocks_(
+ new ParameterBlock* [
+ cost_function->parameter_block_sizes().size()]) {
+ std::copy(parameter_blocks.begin(),
+ parameter_blocks.end(),
+ parameter_blocks_.get());
+}
+
+bool ResidualBlock::Evaluate(double* cost,
+ double* residuals,
+ double** jacobians,
+ double* scratch) const {
+ const int num_parameter_blocks = NumParameterBlocks();
+ const int num_residuals = cost_function_->num_residuals();
+
+ // Collect the parameters from their blocks. This will rarely allocate, since
+ // residuals taking more than 8 parameter block arguments are rare.
+ FixedArray<const double*, 8> parameters(num_parameter_blocks);
+ for (int i = 0; i < num_parameter_blocks; ++i) {
+ parameters[i] = parameter_blocks_[i]->state();
+ }
+
+ // Put pointers into the scratch space into global_jacobians as appropriate.
+ FixedArray<double*, 8> global_jacobians(num_parameter_blocks);
+ if (jacobians != NULL) {
+ for (int i = 0; i < num_parameter_blocks; ++i) {
+ const ParameterBlock* parameter_block = parameter_blocks_[i];
+ if (jacobians[i] != NULL &&
+ parameter_block->LocalParameterizationJacobian() != NULL) {
+ global_jacobians[i] = scratch;
+ scratch += num_residuals * parameter_block->Size();
+ } else {
+ global_jacobians[i] = jacobians[i];
+ }
+ }
+ }
+
+ // If the caller didn't request residuals, use the scratch space for them.
+ bool outputting_residuals = (residuals != NULL);
+ if (!outputting_residuals) {
+ residuals = scratch;
+ }
+
+ // Invalidate the evaluation buffers so that we can check them after
+ // the CostFunction::Evaluate call, to see if all the return values
+ // that were required were written to and that they are finite.
+ double** eval_jacobians = (jacobians != NULL) ? global_jacobians.get() : NULL;
+
+ InvalidateEvaluation(*this, cost, residuals, eval_jacobians);
+
+ if (!cost_function_->Evaluate(parameters.get(), residuals, eval_jacobians)) {
+ return false;
+ }
+
+ if (!IsEvaluationValid(*this,
+ parameters.get(),
+ cost,
+ residuals,
+ eval_jacobians)) {
+ string message =
+ "\n\n"
+ "Error in evaluating the ResidualBlock.\n\n"
+ "There are two possible reasons. Either the CostFunction did not evaluate and fill all \n" // NOLINT
+ "residual and jacobians that were requested or there was a non-finite value (nan/infinite)\n" // NOLINT
+ "generated during the or jacobian computation. \n\n" +
+ EvaluationToString(*this,
+ parameters.get(),
+ cost,
+ residuals,
+ eval_jacobians);
+ LOG(WARNING) << message;
+ return false;
+ }
+
+ double squared_norm = VectorRef(residuals, num_residuals).squaredNorm();
+
+ // Update the jacobians with the local parameterizations.
+ if (jacobians != NULL) {
+ for (int i = 0; i < num_parameter_blocks; ++i) {
+ if (jacobians[i] != NULL) {
+ const ParameterBlock* parameter_block = parameter_blocks_[i];
+
+ // Apply local reparameterization to the jacobians.
+ if (parameter_block->LocalParameterizationJacobian() != NULL) {
+ ConstMatrixRef local_to_global(
+ parameter_block->LocalParameterizationJacobian(),
+ parameter_block->Size(),
+ parameter_block->LocalSize());
+ MatrixRef global_jacobian(global_jacobians[i],
+ num_residuals,
+ parameter_block->Size());
+ MatrixRef local_jacobian(jacobians[i],
+ num_residuals,
+ parameter_block->LocalSize());
+ local_jacobian.noalias() = global_jacobian * local_to_global;
+ }
+ }
+ }
+ }
+
+ if (loss_function_ == NULL) {
+ *cost = 0.5 * squared_norm;
+ return true;
+ }
+
+ double rho[3];
+ loss_function_->Evaluate(squared_norm, rho);
+ *cost = 0.5 * rho[0];
+
+ // No jacobians and not outputting residuals? All done. Doing an early exit
+ // here avoids constructing the "Corrector" object below in a common case.
+ if (jacobians == NULL && !outputting_residuals) {
+ return true;
+ }
+
+ // Correct for the effects of the loss function. The jacobians need to be
+ // corrected before the residuals, since they use the uncorrected residuals.
+ Corrector correct(squared_norm, rho);
+ if (jacobians != NULL) {
+ for (int i = 0; i < num_parameter_blocks; ++i) {
+ if (jacobians[i] != NULL) {
+ const ParameterBlock* parameter_block = parameter_blocks_[i];
+
+ // Correct the jacobians for the loss function.
+ correct.CorrectJacobian(num_residuals,
+ parameter_block->LocalSize(),
+ residuals,
+ jacobians[i]);
+ }
+ }
+ }
+
+ // Correct the residuals with the loss function.
+ if (outputting_residuals) {
+ correct.CorrectResiduals(num_residuals, residuals);
+ }
+ return true;
+}
+
+int ResidualBlock::NumScratchDoublesForEvaluate() const {
+ // Compute the amount of scratch space needed to store the full-sized
+ // jacobians. For parameters that have no local parameterization no storage
+ // is needed and the passed-in jacobian array is used directly. Also include
+ // space to store the residuals, which is needed for cost-only evaluations.
+ // This is slightly pessimistic, since both won't be needed all the time, but
+ // the amount of excess should not cause problems for the caller.
+ int num_parameters = NumParameterBlocks();
+ int scratch_doubles = 1;
+ for (int i = 0; i < num_parameters; ++i) {
+ const ParameterBlock* parameter_block = parameter_blocks_[i];
+ if (!parameter_block->IsConstant() &&
+ parameter_block->LocalParameterizationJacobian() != NULL) {
+ scratch_doubles += parameter_block->Size();
+ }
+ }
+ scratch_doubles *= NumResiduals();
+ return scratch_doubles;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/residual_block.h b/internal/ceres/residual_block.h
new file mode 100644
index 0000000..e0a06e7
--- /dev/null
+++ b/internal/ceres/residual_block.h
@@ -0,0 +1,124 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+// keir@google.com (Keir Mierle)
+//
+// Purpose : Class and struct definitions for parameter and residual blocks.
+
+#ifndef CERES_INTERNAL_RESIDUAL_BLOCK_H_
+#define CERES_INTERNAL_RESIDUAL_BLOCK_H_
+
+#include <vector>
+
+#include "ceres/cost_function.h"
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+class LossFunction;
+
+namespace internal {
+
+class ParameterBlock;
+
+// A term in the least squares problem. The mathematical form of each term in
+// the overall least-squares cost function is:
+//
+// 1
+// --- loss_function( || cost_function(block1, block2, ...) ||^2 ),
+// 2
+//
+// Storing the cost function and the loss function separately permits optimizing
+// the problem with standard non-linear least techniques, without requiring a
+// more general non-linear solver.
+//
+// The residual block stores pointers to but does not own the cost functions,
+// loss functions, and parameter blocks.
+class ResidualBlock {
+ public:
+ ResidualBlock(const CostFunction* cost_function,
+ const LossFunction* loss_function,
+ const vector<ParameterBlock*>& parameter_blocks);
+
+ // Evaluates the residual term, storing the scalar cost in *cost, the residual
+ // components in *residuals, and the jacobians between the parameters and
+ // residuals in jacobians[i], in row-major order. If residuals is NULL, the
+ // residuals are not computed. If jacobians is NULL, no jacobians are
+ // computed. If jacobians[i] is NULL, then the jacobian for that parameter is
+ // not computed.
+ //
+ // Evaluate needs scratch space which must be supplied by the caller via
+ // scratch. The array should have at least NumScratchDoublesForEvaluate()
+ // space available.
+ //
+ // The return value indicates the success or failure. If the function returns
+ // false, the caller should expect the the output memory locations to have
+ // been modified.
+ //
+ // The returned cost and jacobians have had robustification and local
+ // parameterizations applied already; for example, the jacobian for a
+ // 4-dimensional quaternion parameter using the "QuaternionParameterization"
+ // is num_residuals by 3 instead of num_residuals by 4.
+ bool Evaluate(double* cost,
+ double* residuals,
+ double** jacobians,
+ double* scratch) const;
+
+ const CostFunction* cost_function() const { return cost_function_; }
+ const LossFunction* loss_function() const { return loss_function_; }
+
+ // Access the parameter blocks for this residual. The array has size
+ // NumParameterBlocks().
+ ParameterBlock* const* parameter_blocks() const {
+ return parameter_blocks_.get();
+ }
+
+ // Number of variable blocks that this residual term depends on.
+ int NumParameterBlocks() const {
+ return cost_function_->parameter_block_sizes().size();
+ }
+
+ // The size of the residual vector returned by this residual function.
+ int NumResiduals() const { return cost_function_->num_residuals(); }
+
+ // The minimum amount of scratch space needed to pass to Evaluate().
+ int NumScratchDoublesForEvaluate() const;
+
+ private:
+ const CostFunction* cost_function_;
+ const LossFunction* loss_function_;
+ scoped_array<ParameterBlock*> parameter_blocks_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_RESIDUAL_BLOCK_H_
diff --git a/internal/ceres/residual_block_test.cc b/internal/ceres/residual_block_test.cc
new file mode 100644
index 0000000..92b79f6
--- /dev/null
+++ b/internal/ceres/residual_block_test.cc
@@ -0,0 +1,326 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/residual_block.h"
+
+#include "gtest/gtest.h"
+#include "ceres/parameter_block.h"
+#include "ceres/sized_cost_function.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/local_parameterization.h"
+
+namespace ceres {
+namespace internal {
+
+// Trivial cost function that accepts three arguments.
+class TernaryCostFunction: public CostFunction {
+ public:
+ TernaryCostFunction(int num_residuals,
+ int16 parameter_block1_size,
+ int16 parameter_block2_size,
+ int16 parameter_block3_size) {
+ set_num_residuals(num_residuals);
+ mutable_parameter_block_sizes()->push_back(parameter_block1_size);
+ mutable_parameter_block_sizes()->push_back(parameter_block2_size);
+ mutable_parameter_block_sizes()->push_back(parameter_block3_size);
+ }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ for (int i = 0; i < num_residuals(); ++i) {
+ residuals[i] = i;
+ }
+ if (jacobians) {
+ for (int k = 0; k < 3; ++k) {
+ if (jacobians[k] != NULL) {
+ MatrixRef jacobian(jacobians[k],
+ num_residuals(),
+ parameter_block_sizes()[k]);
+ jacobian.setConstant(k);
+ }
+ }
+ }
+ return true;
+ }
+};
+
+TEST(ResidualBlock, EvaluteWithNoLossFunctionOrLocalParameterizations) {
+ double scratch[64];
+
+ // Prepare the parameter blocks.
+ double values_x[2];
+ ParameterBlock x(values_x, 2);
+
+ double values_y[3];
+ ParameterBlock y(values_y, 3);
+
+ double values_z[4];
+ ParameterBlock z(values_z, 4);
+
+ vector<ParameterBlock*> parameters;
+ parameters.push_back(&x);
+ parameters.push_back(&y);
+ parameters.push_back(&z);
+
+ TernaryCostFunction cost_function(3, 2, 3, 4);
+
+ // Create the object under tests.
+ ResidualBlock residual_block(&cost_function, NULL, parameters);
+
+ // Verify getters.
+ EXPECT_EQ(&cost_function, residual_block.cost_function());
+ EXPECT_EQ(NULL, residual_block.loss_function());
+ EXPECT_EQ(parameters[0], residual_block.parameter_blocks()[0]);
+ EXPECT_EQ(parameters[1], residual_block.parameter_blocks()[1]);
+ EXPECT_EQ(parameters[2], residual_block.parameter_blocks()[2]);
+ EXPECT_EQ(3, residual_block.NumScratchDoublesForEvaluate());
+
+ // Verify cost-only evaluation.
+ double cost;
+ residual_block.Evaluate(&cost, NULL, NULL, scratch);
+ EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+
+ // Verify cost and residual evaluation.
+ double residuals[3];
+ residual_block.Evaluate(&cost, residuals, NULL, scratch);
+ EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+ EXPECT_EQ(0.0, residuals[0]);
+ EXPECT_EQ(1.0, residuals[1]);
+ EXPECT_EQ(2.0, residuals[2]);
+
+ // Verify cost, residual, and jacobian evaluation.
+ cost = 0.0;
+ VectorRef(residuals, 3).setConstant(0.0);
+
+ Matrix jacobian_rx(3, 2);
+ Matrix jacobian_ry(3, 3);
+ Matrix jacobian_rz(3, 4);
+
+ jacobian_rx.setConstant(-1.0);
+ jacobian_ry.setConstant(-1.0);
+ jacobian_rz.setConstant(-1.0);
+
+ double *jacobian_ptrs[3] = {
+ jacobian_rx.data(),
+ jacobian_ry.data(),
+ jacobian_rz.data()
+ };
+
+ residual_block.Evaluate(&cost, residuals, jacobian_ptrs, scratch);
+ EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+ EXPECT_EQ(0.0, residuals[0]);
+ EXPECT_EQ(1.0, residuals[1]);
+ EXPECT_EQ(2.0, residuals[2]);
+
+ EXPECT_TRUE((jacobian_rx.array() == 0.0).all()) << "\n" << jacobian_rx;
+ EXPECT_TRUE((jacobian_ry.array() == 1.0).all()) << "\n" << jacobian_ry;
+ EXPECT_TRUE((jacobian_rz.array() == 2.0).all()) << "\n" << jacobian_rz;
+
+ // Verify cost, residual, and partial jacobian evaluation.
+ cost = 0.0;
+ VectorRef(residuals, 3).setConstant(0.0);
+ jacobian_rx.setConstant(-1.0);
+ jacobian_ry.setConstant(-1.0);
+ jacobian_rz.setConstant(-1.0);
+
+ jacobian_ptrs[1] = NULL; // Don't compute the jacobian for y.
+
+ residual_block.Evaluate(&cost, residuals, jacobian_ptrs, scratch);
+ EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+ EXPECT_EQ(0.0, residuals[0]);
+ EXPECT_EQ(1.0, residuals[1]);
+ EXPECT_EQ(2.0, residuals[2]);
+
+ EXPECT_TRUE((jacobian_rx.array() == 0.0).all()) << "\n" << jacobian_rx;
+ EXPECT_TRUE((jacobian_ry.array() == -1.0).all()) << "\n" << jacobian_ry;
+ EXPECT_TRUE((jacobian_rz.array() == 2.0).all()) << "\n" << jacobian_rz;
+}
+
+// Trivial cost function that accepts three arguments.
+class LocallyParameterizedCostFunction: public SizedCostFunction<3, 2, 3, 4> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ for (int i = 0; i < num_residuals(); ++i) {
+ residuals[i] = i;
+ }
+ if (jacobians) {
+ for (int k = 0; k < 3; ++k) {
+ // The jacobians here are full sized, but they are transformed in the
+ // evaluator into the "local" jacobian. In the tests, the "subset
+ // constant" parameterization is used, which should pick out columns
+ // from these jacobians. Put values in the jacobian that make this
+ // obvious; in particular, make the jacobians like this:
+ //
+ // 0 1 2 3 4 ...
+ // 0 1 2 3 4 ...
+ // 0 1 2 3 4 ...
+ //
+ if (jacobians[k] != NULL) {
+ MatrixRef jacobian(jacobians[k],
+ num_residuals(),
+ parameter_block_sizes()[k]);
+ for (int j = 0; j < k + 2; ++j) {
+ jacobian.col(j).setConstant(j);
+ }
+ }
+ }
+ }
+ return true;
+ }
+};
+
+TEST(ResidualBlock, EvaluteWithLocalParameterizations) {
+ double scratch[64];
+
+ // Prepare the parameter blocks.
+ double values_x[2];
+ ParameterBlock x(values_x, 2);
+
+ double values_y[3];
+ ParameterBlock y(values_y, 3);
+
+ double values_z[4];
+ ParameterBlock z(values_z, 4);
+
+ vector<ParameterBlock*> parameters;
+ parameters.push_back(&x);
+ parameters.push_back(&y);
+ parameters.push_back(&z);
+
+ // Make x have the first component fixed.
+ vector<int> x_fixed;
+ x_fixed.push_back(0);
+ SubsetParameterization x_parameterization(2, x_fixed);
+ x.SetParameterization(&x_parameterization);
+
+ // Make z have the last and last component fixed.
+ vector<int> z_fixed;
+ z_fixed.push_back(2);
+ SubsetParameterization z_parameterization(4, z_fixed);
+ z.SetParameterization(&z_parameterization);
+
+ LocallyParameterizedCostFunction cost_function;
+
+ // Create the object under tests.
+ ResidualBlock residual_block(&cost_function, NULL, parameters);
+
+ // Verify getters.
+ EXPECT_EQ(&cost_function, residual_block.cost_function());
+ EXPECT_EQ(NULL, residual_block.loss_function());
+ EXPECT_EQ(parameters[0], residual_block.parameter_blocks()[0]);
+ EXPECT_EQ(parameters[1], residual_block.parameter_blocks()[1]);
+ EXPECT_EQ(parameters[2], residual_block.parameter_blocks()[2]);
+ EXPECT_EQ(3*(2 + 4) + 3, residual_block.NumScratchDoublesForEvaluate());
+
+ // Verify cost-only evaluation.
+ double cost;
+ residual_block.Evaluate(&cost, NULL, NULL, scratch);
+ EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+
+ // Verify cost and residual evaluation.
+ double residuals[3];
+ residual_block.Evaluate(&cost, residuals, NULL, scratch);
+ EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+ EXPECT_EQ(0.0, residuals[0]);
+ EXPECT_EQ(1.0, residuals[1]);
+ EXPECT_EQ(2.0, residuals[2]);
+
+ // Verify cost, residual, and jacobian evaluation.
+ cost = 0.0;
+ VectorRef(residuals, 3).setConstant(0.0);
+
+ Matrix jacobian_rx(3, 1); // Since the first element is fixed.
+ Matrix jacobian_ry(3, 3);
+ Matrix jacobian_rz(3, 3); // Since the third element is fixed.
+
+ jacobian_rx.setConstant(-1.0);
+ jacobian_ry.setConstant(-1.0);
+ jacobian_rz.setConstant(-1.0);
+
+ double *jacobian_ptrs[3] = {
+ jacobian_rx.data(),
+ jacobian_ry.data(),
+ jacobian_rz.data()
+ };
+
+ residual_block.Evaluate(&cost, residuals, jacobian_ptrs, scratch);
+ EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+ EXPECT_EQ(0.0, residuals[0]);
+ EXPECT_EQ(1.0, residuals[1]);
+ EXPECT_EQ(2.0, residuals[2]);
+
+ Matrix expected_jacobian_rx(3, 1);
+ expected_jacobian_rx << 1.0, 1.0, 1.0;
+
+ Matrix expected_jacobian_ry(3, 3);
+ expected_jacobian_ry << 0.0, 1.0, 2.0,
+ 0.0, 1.0, 2.0,
+ 0.0, 1.0, 2.0;
+
+ Matrix expected_jacobian_rz(3, 3);
+ expected_jacobian_rz << 0.0, 1.0, /* 2.0, */ 3.0, // 3rd parameter constant.
+ 0.0, 1.0, /* 2.0, */ 3.0,
+ 0.0, 1.0, /* 2.0, */ 3.0;
+
+ EXPECT_EQ(expected_jacobian_rx, jacobian_rx)
+ << "\nExpected:\n" << expected_jacobian_rx
+ << "\nActual:\n" << jacobian_rx;
+ EXPECT_EQ(expected_jacobian_ry, jacobian_ry)
+ << "\nExpected:\n" << expected_jacobian_ry
+ << "\nActual:\n" << jacobian_ry;
+ EXPECT_EQ(expected_jacobian_rz, jacobian_rz)
+ << "\nExpected:\n " << expected_jacobian_rz
+ << "\nActual:\n" << jacobian_rz;
+
+ // Verify cost, residual, and partial jacobian evaluation.
+ cost = 0.0;
+ VectorRef(residuals, 3).setConstant(0.0);
+ jacobian_rx.setConstant(-1.0);
+ jacobian_ry.setConstant(-1.0);
+ jacobian_rz.setConstant(-1.0);
+
+ jacobian_ptrs[1] = NULL; // Don't compute the jacobian for y.
+
+ residual_block.Evaluate(&cost, residuals, jacobian_ptrs, scratch);
+ EXPECT_EQ(0.5 * (0*0 + 1*1 + 2*2), cost);
+ EXPECT_EQ(0.0, residuals[0]);
+ EXPECT_EQ(1.0, residuals[1]);
+ EXPECT_EQ(2.0, residuals[2]);
+
+ EXPECT_EQ(expected_jacobian_rx, jacobian_rx);
+ EXPECT_TRUE((jacobian_ry.array() == -1.0).all()) << "\n" << jacobian_ry;
+ EXPECT_EQ(expected_jacobian_rz, jacobian_rz);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/residual_block_utils.cc b/internal/ceres/residual_block_utils.cc
new file mode 100644
index 0000000..ff18e21
--- /dev/null
+++ b/internal/ceres/residual_block_utils.cc
@@ -0,0 +1,156 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/residual_block_utils.h"
+
+#include <cmath>
+#include <cstddef>
+#include <limits>
+#include "ceres/array_utils.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/parameter_block.h"
+#include "ceres/residual_block.h"
+#include "ceres/stringprintf.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+void InvalidateEvaluation(const ResidualBlock& block,
+ double* cost,
+ double* residuals,
+ double** jacobians) {
+ const int num_parameter_blocks = block.NumParameterBlocks();
+ const int num_residuals = block.NumResiduals();
+
+ InvalidateArray(1, cost);
+ InvalidateArray(num_residuals, residuals);
+ if (jacobians != NULL) {
+ for (int i = 0; i < num_parameter_blocks; ++i) {
+ const int parameter_block_size = block.parameter_blocks()[i]->Size();
+ InvalidateArray(num_residuals * parameter_block_size, jacobians[i]);
+ }
+ }
+}
+
+// Utility routine to print an array of doubles to a string. If the
+// array pointer is NULL, it is treated as an array of zeros.
+void AppendArrayToString(const int size, const double* x, string* result) {
+ for (int i = 0; i < size; ++i) {
+ if (x == NULL) {
+ StringAppendF(result, "Not Computed ");
+ } else {
+ if (x[i] == kImpossibleValue) {
+ StringAppendF(result, "Uninitialized ");
+ } else {
+ StringAppendF(result, "%12g ", x[i]);
+ }
+ }
+ }
+}
+
+string EvaluationToString(const ResidualBlock& block,
+ double const* const* parameters,
+ double* cost,
+ double* residuals,
+ double** jacobians) {
+ CHECK_NOTNULL(cost);
+ CHECK_NOTNULL(residuals);
+
+ const int num_parameter_blocks = block.NumParameterBlocks();
+ const int num_residuals = block.NumResiduals();
+ string result = "";
+
+ StringAppendF(&result,
+ "Residual Block size: %d parameter blocks x %d residuals\n\n",
+ num_parameter_blocks, num_residuals);
+ result +=
+ "For each parameter block, the value of the parameters are printed in the first column \n" // NOLINT
+ "and the value of the jacobian under the corresponding residual. If a ParameterBlock was \n" // NOLINT
+ "held constant then the corresponding jacobian is printed as 'Not Computed'. If an entry \n" // NOLINT
+ "of the Jacobian/residual array was requested but was not written to by user code, it is \n" // NOLINT
+ "indicated by 'Uninitialized'. This is an error. Residuals or Jacobian values evaluating \n" // NOLINT
+ "to Inf or NaN is also an error. \n\n"; // NOLINT
+
+ string space = "Residuals: ";
+ result += space;
+ AppendArrayToString(num_residuals, residuals, &result);
+ StringAppendF(&result, "\n\n");
+
+ for (int i = 0; i < num_parameter_blocks; ++i) {
+ const int parameter_block_size = block.parameter_blocks()[i]->Size();
+ StringAppendF(
+ &result, "Parameter Block %d, size: %d\n", i, parameter_block_size);
+ StringAppendF(&result, "\n");
+ for (int j = 0; j < parameter_block_size; ++j) {
+ AppendArrayToString(1, parameters[i] + j, &result);
+ StringAppendF(&result, "| ");
+ for (int k = 0; k < num_residuals; ++k) {
+ AppendArrayToString(1,
+ (jacobians != NULL && jacobians[i] != NULL)
+ ? jacobians[i] + k * parameter_block_size + j
+ : NULL,
+ &result);
+ }
+ StringAppendF(&result, "\n");
+ }
+ StringAppendF(&result, "\n");
+ }
+ StringAppendF(&result, "\n");
+ return result;
+}
+
+bool IsEvaluationValid(const ResidualBlock& block,
+ double const* const* parameters,
+ double* cost,
+ double* residuals,
+ double** jacobians) {
+ const int num_parameter_blocks = block.NumParameterBlocks();
+ const int num_residuals = block.NumResiduals();
+
+ if (!IsArrayValid(num_residuals, residuals)) {
+ return false;
+ }
+
+ if (jacobians != NULL) {
+ for (int i = 0; i < num_parameter_blocks; ++i) {
+ const int parameter_block_size = block.parameter_blocks()[i]->Size();
+ if (!IsArrayValid(num_residuals * parameter_block_size, jacobians[i])) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/residual_block_utils.h b/internal/ceres/residual_block_utils.h
new file mode 100644
index 0000000..7051c21
--- /dev/null
+++ b/internal/ceres/residual_block_utils.h
@@ -0,0 +1,80 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Utility routines for ResidualBlock evaluation.
+//
+// These are useful for detecting two common class of errors.
+//
+// 1. Uninitialized memory - where the user for some reason did not
+// compute part of a cost/residual/jacobian.
+//
+// 2. Numerical failure while computing the cost/residual/jacobian,
+// e.g. NaN, infinities etc. This is particularly useful since the
+// automatic differentiation code does computations that are not
+// evident to the user and can silently generate hard to debug errors.
+
+#ifndef CERES_INTERNAL_RESIDUAL_BLOCK_UTILS_H_
+#define CERES_INTERNAL_RESIDUAL_BLOCK_UTILS_H_
+
+#include <string>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+class ResidualBlock;
+
+// Invalidate cost, resdual and jacobian arrays (if not NULL).
+void InvalidateEvaluation(const ResidualBlock& block,
+ double* cost,
+ double* residuals,
+ double** jacobians);
+
+// Check if any of the arrays cost, residuals or jacobians contains an
+// NaN, return true if it does.
+bool IsEvaluationValid(const ResidualBlock& block,
+ double const* const* parameters,
+ double* cost,
+ double* residuals,
+ double** jacobians);
+
+// Create a string representation of the Residual block containing the
+// value of the parameters, residuals and jacobians if present.
+// Useful for debugging output.
+string EvaluationToString(const ResidualBlock& block,
+ double const* const* parameters,
+ double* cost,
+ double* residuals,
+ double** jacobians);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_RESIDUAL_BLOCK_UTILS_H_
diff --git a/internal/ceres/residual_block_utils_test.cc b/internal/ceres/residual_block_utils_test.cc
new file mode 100644
index 0000000..db9ad6d
--- /dev/null
+++ b/internal/ceres/residual_block_utils_test.cc
@@ -0,0 +1,165 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <cmath>
+#include <limits>
+#include "gtest/gtest.h"
+#include "ceres/parameter_block.h"
+#include "ceres/residual_block.h"
+#include "ceres/residual_block_utils.h"
+#include "ceres/cost_function.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/sized_cost_function.h"
+
+namespace ceres {
+namespace internal {
+
+// Routine to check if ResidualBlock::Evaluate for unary CostFunction
+// with one residual succeeds with true or dies.
+void CheckEvaluation(const CostFunction& cost_function, bool is_good) {
+ double x = 1.0;
+ ParameterBlock parameter_block(&x, 1);
+ vector<ParameterBlock*> parameter_blocks;
+ parameter_blocks.push_back(&parameter_block);
+
+ ResidualBlock residual_block(&cost_function,
+ NULL,
+ parameter_blocks);
+
+ scoped_array<double> scratch(
+ new double[residual_block.NumScratchDoublesForEvaluate()]);
+
+ double cost;
+ double residuals;
+ double jacobian;
+ double* jacobians[] = { &jacobian };
+
+ EXPECT_EQ(residual_block.Evaluate(&cost,
+ &residuals,
+ jacobians,
+ scratch.get()), is_good);
+}
+
+// A CostFunction that behaves normaly, i.e., it computes numerically
+// valid residuals and jacobians.
+class GoodCostFunction: public SizedCostFunction<1, 1> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ residuals[0] = 1;
+ if (jacobians != NULL && jacobians[0] != NULL) {
+ jacobians[0][0] = 0.0;
+ }
+ return true;
+ }
+};
+
+// The following four CostFunctions simulate the different ways in
+// which user code can cause ResidualBlock::Evaluate to fail.
+class NoResidualUpdateCostFunction: public SizedCostFunction<1, 1> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ // Forget to update the residuals.
+ // residuals[0] = 1;
+ if (jacobians != NULL && jacobians[0] != NULL) {
+ jacobians[0][0] = 0.0;
+ }
+ return true;
+ }
+};
+
+class NoJacobianUpdateCostFunction: public SizedCostFunction<1, 1> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ residuals[0] = 1;
+ if (jacobians != NULL && jacobians[0] != NULL) {
+ // Forget to update the jacobians.
+ // jacobians[0][0] = 0.0;
+ }
+ return true;
+ }
+};
+
+class BadResidualCostFunction: public SizedCostFunction<1, 1> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ residuals[0] = std::numeric_limits<double>::infinity();
+ if (jacobians != NULL && jacobians[0] != NULL) {
+ jacobians[0][0] = 0.0;
+ }
+ return true;
+ }
+};
+
+class BadJacobianCostFunction: public SizedCostFunction<1, 1> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ residuals[0] = 1.0;
+ if (jacobians != NULL && jacobians[0] != NULL) {
+ jacobians[0][0] = std::numeric_limits<double>::quiet_NaN();
+ }
+ return true;
+ }
+};
+
+// Note: It is preferable to write the below test as:
+//
+// CheckEvaluation(GoodCostFunction(), true);
+// CheckEvaluation(NoResidualUpdateCostFunction(), false);
+// CheckEvaluation(NoJacobianUpdateCostFunction(), false);
+// ...
+//
+// however, there is a bug in the version of GCC on Mac OS X we tested, which
+// requires the objects get put into local variables instead of getting
+// instantiated on the stack.
+TEST(ResidualBlockUtils, CheckAllCombinationsOfBadness) {
+ GoodCostFunction good_fun;
+ CheckEvaluation(good_fun, true);
+ NoResidualUpdateCostFunction no_residual;
+ CheckEvaluation(no_residual, false);
+ NoJacobianUpdateCostFunction no_jacobian;
+ CheckEvaluation(no_jacobian, false);
+ BadResidualCostFunction bad_residual;
+ CheckEvaluation(bad_residual, false);
+ BadJacobianCostFunction bad_jacobian;
+ CheckEvaluation(bad_jacobian, false);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/rotation_test.cc b/internal/ceres/rotation_test.cc
new file mode 100644
index 0000000..8e40507
--- /dev/null
+++ b/internal/ceres/rotation_test.cc
@@ -0,0 +1,970 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <cmath>
+#include <limits>
+#include <string>
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/jet.h"
+#include "ceres/rotation.h"
+#include "ceres/stringprintf.h"
+#include "ceres/test_util.h"
+#include "glog/logging.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+const double kPi = 3.14159265358979323846;
+const double kHalfSqrt2 = 0.707106781186547524401;
+
+double RandDouble() {
+ double r = rand();
+ return r / RAND_MAX;
+}
+
+// A tolerance value for floating-point comparisons.
+static double const kTolerance = numeric_limits<double>::epsilon() * 10;
+
+// Looser tolerance used for for numerically unstable conversions.
+static double const kLooseTolerance = 1e-9;
+
+// Use as:
+// double quaternion[4];
+// EXPECT_THAT(quaternion, IsNormalizedQuaternion());
+MATCHER(IsNormalizedQuaternion, "") {
+ if (arg == NULL) {
+ *result_listener << "Null quaternion";
+ return false;
+ }
+
+ double norm2 = arg[0] * arg[0] + arg[1] * arg[1] +
+ arg[2] * arg[2] + arg[3] * arg[3];
+ if (fabs(norm2 - 1.0) > kTolerance) {
+ *result_listener << "squared norm is " << norm2;
+ return false;
+ }
+
+ return true;
+}
+
+// Use as:
+// double expected_quaternion[4];
+// double actual_quaternion[4];
+// EXPECT_THAT(actual_quaternion, IsNearQuaternion(expected_quaternion));
+MATCHER_P(IsNearQuaternion, expected, "") {
+ if (arg == NULL) {
+ *result_listener << "Null quaternion";
+ return false;
+ }
+
+ // Quaternions are equivalent upto a sign change. So we will compare
+ // both signs before declaring failure.
+ bool near = true;
+ for (int i = 0; i < 4; i++) {
+ if (fabs(arg[i] - expected[i]) > kTolerance) {
+ near = false;
+ break;
+ }
+ }
+
+ if (near) {
+ return true;
+ }
+
+ near = true;
+ for (int i = 0; i < 4; i++) {
+ if (fabs(arg[i] + expected[i]) > kTolerance) {
+ near = false;
+ break;
+ }
+ }
+
+ if (near) {
+ return true;
+ }
+
+ *result_listener << "expected : "
+ << expected[0] << " "
+ << expected[1] << " "
+ << expected[2] << " "
+ << expected[3] << " "
+ << "actual : "
+ << arg[0] << " "
+ << arg[1] << " "
+ << arg[2] << " "
+ << arg[3];
+ return false;
+}
+
+// Use as:
+// double expected_axis_angle[4];
+// double actual_axis_angle[4];
+// EXPECT_THAT(actual_axis_angle, IsNearAngleAxis(expected_axis_angle));
+MATCHER_P(IsNearAngleAxis, expected, "") {
+ if (arg == NULL) {
+ *result_listener << "Null axis/angle";
+ return false;
+ }
+
+ for (int i = 0; i < 3; i++) {
+ if (fabs(arg[i] - expected[i]) > kTolerance) {
+ *result_listener << "component " << i << " should be " << expected[i];
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Use as:
+// double matrix[9];
+// EXPECT_THAT(matrix, IsOrthonormal());
+MATCHER(IsOrthonormal, "") {
+ if (arg == NULL) {
+ *result_listener << "Null matrix";
+ return false;
+ }
+
+ for (int c1 = 0; c1 < 3; c1++) {
+ for (int c2 = 0; c2 < 3; c2++) {
+ double v = 0;
+ for (int i = 0; i < 3; i++) {
+ v += arg[i + 3 * c1] * arg[i + 3 * c2];
+ }
+ double expected = (c1 == c2) ? 1 : 0;
+ if (fabs(expected - v) > kTolerance) {
+ *result_listener << "Columns " << c1 << " and " << c2
+ << " should have dot product " << expected
+ << " but have " << v;
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+// Use as:
+// double matrix1[9];
+// double matrix2[9];
+// EXPECT_THAT(matrix1, IsNear3x3Matrix(matrix2));
+MATCHER_P(IsNear3x3Matrix, expected, "") {
+ if (arg == NULL) {
+ *result_listener << "Null matrix";
+ return false;
+ }
+
+ for (int i = 0; i < 9; i++) {
+ if (fabs(arg[i] - expected[i]) > kTolerance) {
+ *result_listener << "component " << i << " should be " << expected[i];
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Transforms a zero axis/angle to a quaternion.
+TEST(Rotation, ZeroAngleAxisToQuaternion) {
+ double axis_angle[3] = { 0, 0, 0 };
+ double quaternion[4];
+ double expected[4] = { 1, 0, 0, 0 };
+ AngleAxisToQuaternion(axis_angle, quaternion);
+ EXPECT_THAT(quaternion, IsNormalizedQuaternion());
+ EXPECT_THAT(quaternion, IsNearQuaternion(expected));
+}
+
+// Test that exact conversion works for small angles.
+TEST(Rotation, SmallAngleAxisToQuaternion) {
+ // Small, finite value to test.
+ double theta = 1.0e-2;
+ double axis_angle[3] = { theta, 0, 0 };
+ double quaternion[4];
+ double expected[4] = { cos(theta/2), sin(theta/2.0), 0, 0 };
+ AngleAxisToQuaternion(axis_angle, quaternion);
+ EXPECT_THAT(quaternion, IsNormalizedQuaternion());
+ EXPECT_THAT(quaternion, IsNearQuaternion(expected));
+}
+
+// Test that approximate conversion works for very small angles.
+TEST(Rotation, TinyAngleAxisToQuaternion) {
+ // Very small value that could potentially cause underflow.
+ double theta = pow(numeric_limits<double>::min(), 0.75);
+ double axis_angle[3] = { theta, 0, 0 };
+ double quaternion[4];
+ double expected[4] = { cos(theta/2), sin(theta/2.0), 0, 0 };
+ AngleAxisToQuaternion(axis_angle, quaternion);
+ EXPECT_THAT(quaternion, IsNormalizedQuaternion());
+ EXPECT_THAT(quaternion, IsNearQuaternion(expected));
+}
+
+// Transforms a rotation by pi/2 around X to a quaternion.
+TEST(Rotation, XRotationToQuaternion) {
+ double axis_angle[3] = { kPi / 2, 0, 0 };
+ double quaternion[4];
+ double expected[4] = { kHalfSqrt2, kHalfSqrt2, 0, 0 };
+ AngleAxisToQuaternion(axis_angle, quaternion);
+ EXPECT_THAT(quaternion, IsNormalizedQuaternion());
+ EXPECT_THAT(quaternion, IsNearQuaternion(expected));
+}
+
+// Transforms a unit quaternion to an axis angle.
+TEST(Rotation, UnitQuaternionToAngleAxis) {
+ double quaternion[4] = { 1, 0, 0, 0 };
+ double axis_angle[3];
+ double expected[3] = { 0, 0, 0 };
+ QuaternionToAngleAxis(quaternion, axis_angle);
+ EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
+}
+
+// Transforms a quaternion that rotates by pi about the Y axis to an axis angle.
+TEST(Rotation, YRotationQuaternionToAngleAxis) {
+ double quaternion[4] = { 0, 0, 1, 0 };
+ double axis_angle[3];
+ double expected[3] = { 0, kPi, 0 };
+ QuaternionToAngleAxis(quaternion, axis_angle);
+ EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
+}
+
+// Transforms a quaternion that rotates by pi/3 about the Z axis to an axis
+// angle.
+TEST(Rotation, ZRotationQuaternionToAngleAxis) {
+ double quaternion[4] = { sqrt(3) / 2, 0, 0, 0.5 };
+ double axis_angle[3];
+ double expected[3] = { 0, 0, kPi / 3 };
+ QuaternionToAngleAxis(quaternion, axis_angle);
+ EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
+}
+
+// Test that exact conversion works for small angles.
+TEST(Rotation, SmallQuaternionToAngleAxis) {
+ // Small, finite value to test.
+ double theta = 1.0e-2;
+ double quaternion[4] = { cos(theta/2), sin(theta/2.0), 0, 0 };
+ double axis_angle[3];
+ double expected[3] = { theta, 0, 0 };
+ QuaternionToAngleAxis(quaternion, axis_angle);
+ EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
+}
+
+// Test that approximate conversion works for very small angles.
+TEST(Rotation, TinyQuaternionToAngleAxis) {
+ // Very small value that could potentially cause underflow.
+ double theta = pow(numeric_limits<double>::min(), 0.75);
+ double quaternion[4] = { cos(theta/2), sin(theta/2.0), 0, 0 };
+ double axis_angle[3];
+ double expected[3] = { theta, 0, 0 };
+ QuaternionToAngleAxis(quaternion, axis_angle);
+ EXPECT_THAT(axis_angle, IsNearAngleAxis(expected));
+}
+
+TEST(Rotation, QuaternionToAngleAxisAngleIsLessThanPi) {
+ double quaternion[4];
+ double angle_axis[3];
+
+ const double half_theta = 0.75 * kPi;
+
+ quaternion[0] = cos(half_theta);
+ quaternion[1] = 1.0 * sin(half_theta);
+ quaternion[2] = 0.0;
+ quaternion[3] = 0.0;
+ QuaternionToAngleAxis(quaternion, angle_axis);
+ const double angle = sqrt(angle_axis[0] * angle_axis[0] +
+ angle_axis[1] * angle_axis[1] +
+ angle_axis[2] * angle_axis[2]);
+ EXPECT_LE(angle, kPi);
+}
+
+static const int kNumTrials = 10000;
+
+// Takes a bunch of random axis/angle values, converts them to quaternions,
+// and back again.
+TEST(Rotation, AngleAxisToQuaterionAndBack) {
+ srand(5);
+ for (int i = 0; i < kNumTrials; i++) {
+ double axis_angle[3];
+ // Make an axis by choosing three random numbers in [-1, 1) and
+ // normalizing.
+ double norm = 0;
+ for (int i = 0; i < 3; i++) {
+ axis_angle[i] = RandDouble() * 2 - 1;
+ norm += axis_angle[i] * axis_angle[i];
+ }
+ norm = sqrt(norm);
+
+ // Angle in [-pi, pi).
+ double theta = kPi * 2 * RandDouble() - kPi;
+ for (int i = 0; i < 3; i++) {
+ axis_angle[i] = axis_angle[i] * theta / norm;
+ }
+
+ double quaternion[4];
+ double round_trip[3];
+ // We use ASSERTs here because if there's one failure, there are
+ // probably many and spewing a million failures doesn't make anyone's
+ // day.
+ AngleAxisToQuaternion(axis_angle, quaternion);
+ ASSERT_THAT(quaternion, IsNormalizedQuaternion());
+ QuaternionToAngleAxis(quaternion, round_trip);
+ ASSERT_THAT(round_trip, IsNearAngleAxis(axis_angle));
+ }
+}
+
+// Takes a bunch of random quaternions, converts them to axis/angle,
+// and back again.
+TEST(Rotation, QuaterionToAngleAxisAndBack) {
+ srand(5);
+ for (int i = 0; i < kNumTrials; i++) {
+ double quaternion[4];
+ // Choose four random numbers in [-1, 1) and normalize.
+ double norm = 0;
+ for (int i = 0; i < 4; i++) {
+ quaternion[i] = RandDouble() * 2 - 1;
+ norm += quaternion[i] * quaternion[i];
+ }
+ norm = sqrt(norm);
+
+ for (int i = 0; i < 4; i++) {
+ quaternion[i] = quaternion[i] / norm;
+ }
+
+ double axis_angle[3];
+ double round_trip[4];
+ QuaternionToAngleAxis(quaternion, axis_angle);
+ AngleAxisToQuaternion(axis_angle, round_trip);
+ ASSERT_THAT(round_trip, IsNormalizedQuaternion());
+ ASSERT_THAT(round_trip, IsNearQuaternion(quaternion));
+ }
+}
+
+// Transforms a zero axis/angle to a rotation matrix.
+TEST(Rotation, ZeroAngleAxisToRotationMatrix) {
+ double axis_angle[3] = { 0, 0, 0 };
+ double matrix[9];
+ double expected[9] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
+ AngleAxisToRotationMatrix(axis_angle, matrix);
+ EXPECT_THAT(matrix, IsOrthonormal());
+ EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
+}
+
+TEST(Rotation, NearZeroAngleAxisToRotationMatrix) {
+ double axis_angle[3] = { 1e-24, 2e-24, 3e-24 };
+ double matrix[9];
+ double expected[9] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
+ AngleAxisToRotationMatrix(axis_angle, matrix);
+ EXPECT_THAT(matrix, IsOrthonormal());
+ EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
+}
+
+// Transforms a rotation by pi/2 around X to a rotation matrix and back.
+TEST(Rotation, XRotationToRotationMatrix) {
+ double axis_angle[3] = { kPi / 2, 0, 0 };
+ double matrix[9];
+ // The rotation matrices are stored column-major.
+ double expected[9] = { 1, 0, 0, 0, 0, 1, 0, -1, 0 };
+ AngleAxisToRotationMatrix(axis_angle, matrix);
+ EXPECT_THAT(matrix, IsOrthonormal());
+ EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
+ double round_trip[3];
+ RotationMatrixToAngleAxis(matrix, round_trip);
+ EXPECT_THAT(round_trip, IsNearAngleAxis(axis_angle));
+}
+
+// Transforms an axis angle that rotates by pi about the Y axis to a
+// rotation matrix and back.
+TEST(Rotation, YRotationToRotationMatrix) {
+ double axis_angle[3] = { 0, kPi, 0 };
+ double matrix[9];
+ double expected[9] = { -1, 0, 0, 0, 1, 0, 0, 0, -1 };
+ AngleAxisToRotationMatrix(axis_angle, matrix);
+ EXPECT_THAT(matrix, IsOrthonormal());
+ EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
+
+ double round_trip[3];
+ RotationMatrixToAngleAxis(matrix, round_trip);
+ EXPECT_THAT(round_trip, IsNearAngleAxis(axis_angle));
+}
+
+TEST(Rotation, NearPiAngleAxisRoundTrip) {
+ double in_axis_angle[3];
+ double matrix[9];
+ double out_axis_angle[3];
+
+ srand(5);
+ for (int i = 0; i < kNumTrials; i++) {
+ // Make an axis by choosing three random numbers in [-1, 1) and
+ // normalizing.
+ double norm = 0;
+ for (int i = 0; i < 3; i++) {
+ in_axis_angle[i] = RandDouble() * 2 - 1;
+ norm += in_axis_angle[i] * in_axis_angle[i];
+ }
+ norm = sqrt(norm);
+
+ // Angle in [pi - kMaxSmallAngle, pi).
+ const double kMaxSmallAngle = 1e-2;
+ double theta = kPi - kMaxSmallAngle * RandDouble();
+
+ for (int i = 0; i < 3; i++) {
+ in_axis_angle[i] *= (theta / norm);
+ }
+ AngleAxisToRotationMatrix(in_axis_angle, matrix);
+ RotationMatrixToAngleAxis(matrix, out_axis_angle);
+
+ for (int i = 0; i < 3; ++i) {
+ EXPECT_NEAR(out_axis_angle[i], in_axis_angle[i], kLooseTolerance);
+ }
+ }
+}
+
+TEST(Rotation, AtPiAngleAxisRoundTrip) {
+ // A rotation of kPi about the X axis;
+ static const double kMatrix[3][3] = {
+ {1.0, 0.0, 0.0},
+ {0.0, -1.0, 0.0},
+ {0.0, 0.0, -1.0}
+ };
+
+ double in_matrix[9];
+ // Fill it from kMatrix in col-major order.
+ for (int j = 0, k = 0; j < 3; ++j) {
+ for (int i = 0; i < 3; ++i, ++k) {
+ in_matrix[k] = kMatrix[i][j];
+ }
+ }
+
+ const double expected_axis_angle[3] = { kPi, 0, 0 };
+
+ double out_matrix[9];
+ double axis_angle[3];
+ RotationMatrixToAngleAxis(in_matrix, axis_angle);
+ AngleAxisToRotationMatrix(axis_angle, out_matrix);
+
+ LOG(INFO) << "AngleAxis = " << axis_angle[0] << " " << axis_angle[1]
+ << " " << axis_angle[2];
+ LOG(INFO) << "Expected AngleAxis = " << kPi << " 0 0";
+ double out_rowmajor[3][3];
+ for (int j = 0, k = 0; j < 3; ++j) {
+ for (int i = 0; i < 3; ++i, ++k) {
+ out_rowmajor[i][j] = out_matrix[k];
+ }
+ }
+ LOG(INFO) << "Rotation:";
+ LOG(INFO) << "EXPECTED | ACTUAL";
+ for (int i = 0; i < 3; ++i) {
+ string line;
+ for (int j = 0; j < 3; ++j) {
+ StringAppendF(&line, "%g ", kMatrix[i][j]);
+ }
+ line += " | ";
+ for (int j = 0; j < 3; ++j) {
+ StringAppendF(&line, "%g ", out_rowmajor[i][j]);
+ }
+ LOG(INFO) << line;
+ }
+
+ EXPECT_THAT(axis_angle, IsNearAngleAxis(expected_axis_angle));
+ EXPECT_THAT(out_matrix, IsNear3x3Matrix(in_matrix));
+}
+
+// Transforms an axis angle that rotates by pi/3 about the Z axis to a
+// rotation matrix.
+TEST(Rotation, ZRotationToRotationMatrix) {
+ double axis_angle[3] = { 0, 0, kPi / 3 };
+ double matrix[9];
+ // This is laid-out row-major on the screen but is actually stored
+ // column-major.
+ double expected[9] = { 0.5, sqrt(3) / 2, 0, // Column 1
+ -sqrt(3) / 2, 0.5, 0, // Column 2
+ 0, 0, 1 }; // Column 3
+ AngleAxisToRotationMatrix(axis_angle, matrix);
+ EXPECT_THAT(matrix, IsOrthonormal());
+ EXPECT_THAT(matrix, IsNear3x3Matrix(expected));
+ double round_trip[3];
+ RotationMatrixToAngleAxis(matrix, round_trip);
+ EXPECT_THAT(round_trip, IsNearAngleAxis(axis_angle));
+}
+
+// Takes a bunch of random axis/angle values, converts them to rotation
+// matrices, and back again.
+TEST(Rotation, AngleAxisToRotationMatrixAndBack) {
+ srand(5);
+ for (int i = 0; i < kNumTrials; i++) {
+ double axis_angle[3];
+ // Make an axis by choosing three random numbers in [-1, 1) and
+ // normalizing.
+ double norm = 0;
+ for (int i = 0; i < 3; i++) {
+ axis_angle[i] = RandDouble() * 2 - 1;
+ norm += axis_angle[i] * axis_angle[i];
+ }
+ norm = sqrt(norm);
+
+ // Angle in [-pi, pi).
+ double theta = kPi * 2 * RandDouble() - kPi;
+ for (int i = 0; i < 3; i++) {
+ axis_angle[i] = axis_angle[i] * theta / norm;
+ }
+
+ double matrix[9];
+ double round_trip[3];
+ AngleAxisToRotationMatrix(axis_angle, matrix);
+ ASSERT_THAT(matrix, IsOrthonormal());
+ RotationMatrixToAngleAxis(matrix, round_trip);
+
+ for (int i = 0; i < 3; ++i) {
+ EXPECT_NEAR(round_trip[i], axis_angle[i], kLooseTolerance);
+ }
+ }
+}
+
+// Transposes a 3x3 matrix.
+static void Transpose3x3(double m[9]) {
+ std::swap(m[1], m[3]);
+ std::swap(m[2], m[6]);
+ std::swap(m[5], m[7]);
+}
+
+// Convert Euler angles from radians to degrees.
+static void ToDegrees(double ea[3]) {
+ for (int i = 0; i < 3; ++i)
+ ea[i] *= 180.0 / kPi;
+}
+
+// Compare the 3x3 rotation matrices produced by the axis-angle
+// rotation 'aa' and the Euler angle rotation 'ea' (in radians).
+static void CompareEulerToAngleAxis(double aa[3], double ea[3]) {
+ double aa_matrix[9];
+ AngleAxisToRotationMatrix(aa, aa_matrix);
+ Transpose3x3(aa_matrix); // Column to row major order.
+
+ double ea_matrix[9];
+ ToDegrees(ea); // Radians to degrees.
+ const int kRowStride = 3;
+ EulerAnglesToRotationMatrix(ea, kRowStride, ea_matrix);
+
+ EXPECT_THAT(aa_matrix, IsOrthonormal());
+ EXPECT_THAT(ea_matrix, IsOrthonormal());
+ EXPECT_THAT(ea_matrix, IsNear3x3Matrix(aa_matrix));
+}
+
+// Test with rotation axis along the x/y/z axes.
+// Also test zero rotation.
+TEST(EulerAnglesToRotationMatrix, OnAxis) {
+ int n_tests = 0;
+ for (double x = -1.0; x <= 1.0; x += 1.0) {
+ for (double y = -1.0; y <= 1.0; y += 1.0) {
+ for (double z = -1.0; z <= 1.0; z += 1.0) {
+ if ((x != 0) + (y != 0) + (z != 0) > 1)
+ continue;
+ double axis_angle[3] = {x, y, z};
+ double euler_angles[3] = {x, y, z};
+ CompareEulerToAngleAxis(axis_angle, euler_angles);
+ ++n_tests;
+ }
+ }
+ }
+ CHECK_EQ(7, n_tests);
+}
+
+// Test that a random rotation produces an orthonormal rotation
+// matrix.
+TEST(EulerAnglesToRotationMatrix, IsOrthonormal) {
+ srand(5);
+ for (int trial = 0; trial < kNumTrials; ++trial) {
+ double ea[3];
+ for (int i = 0; i < 3; ++i)
+ ea[i] = 360.0 * (RandDouble() * 2.0 - 1.0);
+ double ea_matrix[9];
+ ToDegrees(ea); // Radians to degrees.
+ EulerAnglesToRotationMatrix(ea, 3, ea_matrix);
+ EXPECT_THAT(ea_matrix, IsOrthonormal());
+ }
+}
+
+// Tests using Jets for specific behavior involving auto differentiation
+// near singularity points.
+
+typedef Jet<double, 3> J3;
+typedef Jet<double, 4> J4;
+
+J3 MakeJ3(double a, double v0, double v1, double v2) {
+ J3 j;
+ j.a = a;
+ j.v[0] = v0;
+ j.v[1] = v1;
+ j.v[2] = v2;
+ return j;
+}
+
+J4 MakeJ4(double a, double v0, double v1, double v2, double v3) {
+ J4 j;
+ j.a = a;
+ j.v[0] = v0;
+ j.v[1] = v1;
+ j.v[2] = v2;
+ j.v[3] = v3;
+ return j;
+}
+
+
+bool IsClose(double x, double y) {
+ EXPECT_FALSE(IsNaN(x));
+ EXPECT_FALSE(IsNaN(y));
+ double absdiff = fabs(x - y);
+ if (x == 0 || y == 0) {
+ return absdiff <= kTolerance;
+ }
+ double reldiff = absdiff / max(fabs(x), fabs(y));
+ return reldiff <= kTolerance;
+}
+
+template <int N>
+bool IsClose(const Jet<double, N> &x, const Jet<double, N> &y) {
+ if (IsClose(x.a, y.a)) {
+ for (int i = 0; i < N; i++) {
+ if (!IsClose(x.v[i], y.v[i])) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+template <int M, int N>
+void ExpectJetArraysClose(const Jet<double, N> *x, const Jet<double, N> *y) {
+ for (int i = 0; i < M; i++) {
+ if (!IsClose(x[i], y[i])) {
+ LOG(ERROR) << "Jet " << i << "/" << M << " not equal";
+ LOG(ERROR) << "x[" << i << "]: " << x[i];
+ LOG(ERROR) << "y[" << i << "]: " << y[i];
+ Jet<double, N> d, zero;
+ d.a = y[i].a - x[i].a;
+ for (int j = 0; j < N; j++) {
+ d.v[j] = y[i].v[j] - x[i].v[j];
+ }
+ LOG(ERROR) << "diff: " << d;
+ EXPECT_TRUE(IsClose(x[i], y[i]));
+ }
+ }
+}
+
+// Log-10 of a value well below machine precision.
+static const int kSmallTinyCutoff =
+ static_cast<int>(2 * log(numeric_limits<double>::epsilon())/log(10.0));
+
+// Log-10 of a value just below values representable by double.
+static const int kTinyZeroLimit =
+ static_cast<int>(1 + log(numeric_limits<double>::min())/log(10.0));
+
+// Test that exact conversion works for small angles when jets are used.
+TEST(Rotation, SmallAngleAxisToQuaternionForJets) {
+ // Examine small x rotations that are still large enough
+ // to be well within the range represented by doubles.
+ for (int i = -2; i >= kSmallTinyCutoff; i--) {
+ double theta = pow(10.0, i);
+ J3 axis_angle[3] = { J3(theta, 0), J3(0, 1), J3(0, 2) };
+ J3 quaternion[4];
+ J3 expected[4] = {
+ MakeJ3(cos(theta/2), -sin(theta/2)/2, 0, 0),
+ MakeJ3(sin(theta/2), cos(theta/2)/2, 0, 0),
+ MakeJ3(0, 0, sin(theta/2)/theta, 0),
+ MakeJ3(0, 0, 0, sin(theta/2)/theta),
+ };
+ AngleAxisToQuaternion(axis_angle, quaternion);
+ ExpectJetArraysClose<4, 3>(quaternion, expected);
+ }
+}
+
+
+// Test that conversion works for very small angles when jets are used.
+TEST(Rotation, TinyAngleAxisToQuaternionForJets) {
+ // Examine tiny x rotations that extend all the way to where
+ // underflow occurs.
+ for (int i = kSmallTinyCutoff; i >= kTinyZeroLimit; i--) {
+ double theta = pow(10.0, i);
+ J3 axis_angle[3] = { J3(theta, 0), J3(0, 1), J3(0, 2) };
+ J3 quaternion[4];
+ // To avoid loss of precision in the test itself,
+ // a finite expansion is used here, which will
+ // be exact up to machine precision for the test values used.
+ J3 expected[4] = {
+ MakeJ3(1.0, 0, 0, 0),
+ MakeJ3(0, 0.5, 0, 0),
+ MakeJ3(0, 0, 0.5, 0),
+ MakeJ3(0, 0, 0, 0.5),
+ };
+ AngleAxisToQuaternion(axis_angle, quaternion);
+ ExpectJetArraysClose<4, 3>(quaternion, expected);
+ }
+}
+
+// Test that derivatives are correct for zero rotation.
+TEST(Rotation, ZeroAngleAxisToQuaternionForJets) {
+ J3 axis_angle[3] = { J3(0, 0), J3(0, 1), J3(0, 2) };
+ J3 quaternion[4];
+ J3 expected[4] = {
+ MakeJ3(1.0, 0, 0, 0),
+ MakeJ3(0, 0.5, 0, 0),
+ MakeJ3(0, 0, 0.5, 0),
+ MakeJ3(0, 0, 0, 0.5),
+ };
+ AngleAxisToQuaternion(axis_angle, quaternion);
+ ExpectJetArraysClose<4, 3>(quaternion, expected);
+}
+
+// Test that exact conversion works for small angles.
+TEST(Rotation, SmallQuaternionToAngleAxisForJets) {
+ // Examine small x rotations that are still large enough
+ // to be well within the range represented by doubles.
+ for (int i = -2; i >= kSmallTinyCutoff; i--) {
+ double theta = pow(10.0, i);
+ double s = sin(theta);
+ double c = cos(theta);
+ J4 quaternion[4] = { J4(c, 0), J4(s, 1), J4(0, 2), J4(0, 3) };
+ J4 axis_angle[3];
+ J4 expected[3] = {
+ MakeJ4(s, -2*theta, 2*theta*c, 0, 0),
+ MakeJ4(0, 0, 0, 2*theta/s, 0),
+ MakeJ4(0, 0, 0, 0, 2*theta/s),
+ };
+ QuaternionToAngleAxis(quaternion, axis_angle);
+ ExpectJetArraysClose<3, 4>(axis_angle, expected);
+ }
+}
+
+// Test that conversion works for very small angles.
+TEST(Rotation, TinyQuaternionToAngleAxisForJets) {
+ // Examine tiny x rotations that extend all the way to where
+ // underflow occurs.
+ for (int i = kSmallTinyCutoff; i >= kTinyZeroLimit; i--) {
+ double theta = pow(10.0, i);
+ double s = sin(theta);
+ double c = cos(theta);
+ J4 quaternion[4] = { J4(c, 0), J4(s, 1), J4(0, 2), J4(0, 3) };
+ J4 axis_angle[3];
+ // To avoid loss of precision in the test itself,
+ // a finite expansion is used here, which will
+ // be exact up to machine precision for the test values used.
+ J4 expected[3] = {
+ MakeJ4(theta, -2*theta, 2.0, 0, 0),
+ MakeJ4(0, 0, 0, 2.0, 0),
+ MakeJ4(0, 0, 0, 0, 2.0),
+ };
+ QuaternionToAngleAxis(quaternion, axis_angle);
+ ExpectJetArraysClose<3, 4>(axis_angle, expected);
+ }
+}
+
+// Test that conversion works for no rotation.
+TEST(Rotation, ZeroQuaternionToAngleAxisForJets) {
+ J4 quaternion[4] = { J4(1, 0), J4(0, 1), J4(0, 2), J4(0, 3) };
+ J4 axis_angle[3];
+ J4 expected[3] = {
+ MakeJ4(0, 0, 2.0, 0, 0),
+ MakeJ4(0, 0, 0, 2.0, 0),
+ MakeJ4(0, 0, 0, 0, 2.0),
+ };
+ QuaternionToAngleAxis(quaternion, axis_angle);
+ ExpectJetArraysClose<3, 4>(axis_angle, expected);
+}
+
+TEST(Quaternion, RotatePointGivesSameAnswerAsRotationByMatrixCanned) {
+ // Canned data generated in octave.
+ double const q[4] = {
+ +0.1956830471754074,
+ -0.0150618562474847,
+ +0.7634572982788086,
+ -0.3019454777240753,
+ };
+ double const Q[3][3] = { // Scaled rotation matrix.
+ { -0.6355194033477252, 0.0951730541682254, 0.3078870197911186 },
+ { -0.1411693904792992, 0.5297609702153905, -0.4551502574482019 },
+ { -0.2896955822708862, -0.4669396571547050, -0.4536309793389248 },
+ };
+ double const R[3][3] = { // With unit rows and columns.
+ { -0.8918859164053080, 0.1335655625725649, 0.4320876677394745 },
+ { -0.1981166751680096, 0.7434648665444399, -0.6387564287225856 },
+ { -0.4065578619806013, -0.6553016349046693, -0.6366242786393164 },
+ };
+
+ // Compute R from q and compare to known answer.
+ double Rq[3][3];
+ QuaternionToScaledRotation<double>(q, Rq[0]);
+ ExpectArraysClose(9, Q[0], Rq[0], kTolerance);
+
+ // Now do the same but compute R with normalization.
+ QuaternionToRotation<double>(q, Rq[0]);
+ ExpectArraysClose(9, R[0], Rq[0], kTolerance);
+}
+
+
+TEST(Quaternion, RotatePointGivesSameAnswerAsRotationByMatrix) {
+ // Rotation defined by a unit quaternion.
+ double const q[4] = {
+ 0.2318160216097109,
+ -0.0178430356832060,
+ 0.9044300776717159,
+ -0.3576998641394597,
+ };
+ double const p[3] = {
+ +0.11,
+ -13.15,
+ 1.17,
+ };
+
+ double R[3 * 3];
+ QuaternionToRotation(q, R);
+
+ double result1[3];
+ UnitQuaternionRotatePoint(q, p, result1);
+
+ double result2[3];
+ VectorRef(result2, 3) = ConstMatrixRef(R, 3, 3)* ConstVectorRef(p, 3);
+ ExpectArraysClose(3, result1, result2, kTolerance);
+}
+
+
+// Verify that (a * b) * c == a * (b * c).
+TEST(Quaternion, MultiplicationIsAssociative) {
+ double a[4];
+ double b[4];
+ double c[4];
+ for (int i = 0; i < 4; ++i) {
+ a[i] = 2 * RandDouble() - 1;
+ b[i] = 2 * RandDouble() - 1;
+ c[i] = 2 * RandDouble() - 1;
+ }
+
+ double ab[4];
+ double ab_c[4];
+ QuaternionProduct(a, b, ab);
+ QuaternionProduct(ab, c, ab_c);
+
+ double bc[4];
+ double a_bc[4];
+ QuaternionProduct(b, c, bc);
+ QuaternionProduct(a, bc, a_bc);
+
+ ASSERT_NEAR(ab_c[0], a_bc[0], kTolerance);
+ ASSERT_NEAR(ab_c[1], a_bc[1], kTolerance);
+ ASSERT_NEAR(ab_c[2], a_bc[2], kTolerance);
+ ASSERT_NEAR(ab_c[3], a_bc[3], kTolerance);
+}
+
+
+TEST(AngleAxis, RotatePointGivesSameAnswerAsRotationMatrix) {
+ double angle_axis[3];
+ double R[9];
+ double p[3];
+ double angle_axis_rotated_p[3];
+ double rotation_matrix_rotated_p[3];
+
+ for (int i = 0; i < 10000; ++i) {
+ double theta = (2.0 * i * 0.0011 - 1.0) * kPi;
+ for (int j = 0; j < 50; ++j) {
+ double norm2 = 0.0;
+ for (int k = 0; k < 3; ++k) {
+ angle_axis[k] = 2.0 * RandDouble() - 1.0;
+ p[k] = 2.0 * RandDouble() - 1.0;
+ norm2 = angle_axis[k] * angle_axis[k];
+ }
+
+ const double inv_norm = theta / sqrt(norm2);
+ for (int k = 0; k < 3; ++k) {
+ angle_axis[k] *= inv_norm;
+ }
+
+ AngleAxisToRotationMatrix(angle_axis, R);
+ rotation_matrix_rotated_p[0] = R[0] * p[0] + R[3] * p[1] + R[6] * p[2];
+ rotation_matrix_rotated_p[1] = R[1] * p[0] + R[4] * p[1] + R[7] * p[2];
+ rotation_matrix_rotated_p[2] = R[2] * p[0] + R[5] * p[1] + R[8] * p[2];
+
+ AngleAxisRotatePoint(angle_axis, p, angle_axis_rotated_p);
+ for (int k = 0; k < 3; ++k) {
+ EXPECT_NEAR(rotation_matrix_rotated_p[k],
+ angle_axis_rotated_p[k],
+ kTolerance) << "p: " << p[0]
+ << " " << p[1]
+ << " " << p[2]
+ << " angle_axis: " << angle_axis[0]
+ << " " << angle_axis[1]
+ << " " << angle_axis[2];
+ }
+ }
+ }
+}
+
+TEST(AngleAxis, NearZeroRotatePointGivesSameAnswerAsRotationMatrix) {
+ double angle_axis[3];
+ double R[9];
+ double p[3];
+ double angle_axis_rotated_p[3];
+ double rotation_matrix_rotated_p[3];
+
+ for (int i = 0; i < 10000; ++i) {
+ double norm2 = 0.0;
+ for (int k = 0; k < 3; ++k) {
+ angle_axis[k] = 2.0 * RandDouble() - 1.0;
+ p[k] = 2.0 * RandDouble() - 1.0;
+ norm2 = angle_axis[k] * angle_axis[k];
+ }
+
+ double theta = (2.0 * i * 0.0001 - 1.0) * 1e-16;
+ const double inv_norm = theta / sqrt(norm2);
+ for (int k = 0; k < 3; ++k) {
+ angle_axis[k] *= inv_norm;
+ }
+
+ AngleAxisToRotationMatrix(angle_axis, R);
+ rotation_matrix_rotated_p[0] = R[0] * p[0] + R[3] * p[1] + R[6] * p[2];
+ rotation_matrix_rotated_p[1] = R[1] * p[0] + R[4] * p[1] + R[7] * p[2];
+ rotation_matrix_rotated_p[2] = R[2] * p[0] + R[5] * p[1] + R[8] * p[2];
+
+ AngleAxisRotatePoint(angle_axis, p, angle_axis_rotated_p);
+ for (int k = 0; k < 3; ++k) {
+ EXPECT_NEAR(rotation_matrix_rotated_p[k],
+ angle_axis_rotated_p[k],
+ kTolerance) << "p: " << p[0]
+ << " " << p[1]
+ << " " << p[2]
+ << " angle_axis: " << angle_axis[0]
+ << " " << angle_axis[1]
+ << " " << angle_axis[2];
+ }
+ }
+}
+
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/runtime_numeric_diff_cost_function.cc b/internal/ceres/runtime_numeric_diff_cost_function.cc
new file mode 100644
index 0000000..7af275c
--- /dev/null
+++ b/internal/ceres/runtime_numeric_diff_cost_function.cc
@@ -0,0 +1,217 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Based on the templated version in public/numeric_diff_cost_function.h.
+
+#include "ceres/runtime_numeric_diff_cost_function.h"
+
+#include <algorithm>
+#include <numeric>
+#include <vector>
+#include "Eigen/Dense"
+#include "ceres/cost_function.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+bool EvaluateJacobianForParameterBlock(const CostFunction* function,
+ int parameter_block_size,
+ int parameter_block,
+ RuntimeNumericDiffMethod method,
+ double relative_step_size,
+ double const* residuals_at_eval_point,
+ double** parameters,
+ double** jacobians) {
+ using Eigen::Map;
+ using Eigen::Matrix;
+ using Eigen::Dynamic;
+ using Eigen::RowMajor;
+
+ typedef Matrix<double, Dynamic, 1> ResidualVector;
+ typedef Matrix<double, Dynamic, 1> ParameterVector;
+ typedef Matrix<double, Dynamic, Dynamic, RowMajor> JacobianMatrix;
+
+ int num_residuals = function->num_residuals();
+
+ Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block],
+ num_residuals,
+ parameter_block_size);
+
+ // Mutate one element at a time and then restore.
+ Map<ParameterVector> x_plus_delta(parameters[parameter_block],
+ parameter_block_size);
+ ParameterVector x(x_plus_delta);
+ ParameterVector step_size = x.array().abs() * relative_step_size;
+
+ // To handle cases where a paremeter is exactly zero, instead use the mean
+ // step_size for the other dimensions.
+ double fallback_step_size = step_size.sum() / step_size.rows();
+ if (fallback_step_size == 0.0) {
+ // If all the parameters are zero, there's no good answer. Use the given
+ // relative step_size as absolute step_size and hope for the best.
+ fallback_step_size = relative_step_size;
+ }
+
+ // For each parameter in the parameter block, use finite differences to
+ // compute the derivative for that parameter.
+ for (int j = 0; j < parameter_block_size; ++j) {
+ if (step_size(j) == 0.0) {
+ // The parameter is exactly zero, so compromise and use the mean step_size
+ // from the other parameters. This can break in many cases, but it's hard
+ // to pick a good number without problem specific knowledge.
+ step_size(j) = fallback_step_size;
+ }
+ x_plus_delta(j) = x(j) + step_size(j);
+
+ ResidualVector residuals(num_residuals);
+ if (!function->Evaluate(parameters, &residuals[0], NULL)) {
+ // Something went wrong; bail.
+ return false;
+ }
+
+ // Compute this column of the jacobian in 3 steps:
+ // 1. Store residuals for the forward part.
+ // 2. Subtract residuals for the backward (or 0) part.
+ // 3. Divide out the run.
+ parameter_jacobian.col(j) = residuals;
+
+ double one_over_h = 1 / step_size(j);
+ if (method == CENTRAL) {
+ // Compute the function on the other side of x(j).
+ x_plus_delta(j) = x(j) - step_size(j);
+
+ if (!function->Evaluate(parameters, &residuals[0], NULL)) {
+ // Something went wrong; bail.
+ return false;
+ }
+ parameter_jacobian.col(j) -= residuals;
+ one_over_h /= 2;
+ } else {
+ // Forward difference only; reuse existing residuals evaluation.
+ parameter_jacobian.col(j) -=
+ Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
+ }
+ x_plus_delta(j) = x(j); // Restore x_plus_delta.
+
+ // Divide out the run to get slope.
+ parameter_jacobian.col(j) *= one_over_h;
+ }
+ return true;
+}
+
+class RuntimeNumericDiffCostFunction : public CostFunction {
+ public:
+ RuntimeNumericDiffCostFunction(const CostFunction* function,
+ RuntimeNumericDiffMethod method,
+ double relative_step_size)
+ : function_(function),
+ method_(method),
+ relative_step_size_(relative_step_size) {
+ *mutable_parameter_block_sizes() = function->parameter_block_sizes();
+ set_num_residuals(function->num_residuals());
+ }
+
+ virtual ~RuntimeNumericDiffCostFunction() { }
+
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ // Get the function value (residuals) at the the point to evaluate.
+ bool success = function_->Evaluate(parameters, residuals, NULL);
+ if (!success) {
+ // Something went wrong; ignore the jacobian.
+ return false;
+ }
+ if (!jacobians) {
+ // Nothing to do; just forward.
+ return true;
+ }
+
+ const vector<int16>& block_sizes = function_->parameter_block_sizes();
+ CHECK(!block_sizes.empty());
+
+ // Create local space for a copy of the parameters which will get mutated.
+ int parameters_size = accumulate(block_sizes.begin(), block_sizes.end(), 0);
+ vector<double> parameters_copy(parameters_size);
+ vector<double*> parameters_references_copy(block_sizes.size());
+ parameters_references_copy[0] = &parameters_copy[0];
+ for (int block = 1; block < block_sizes.size(); ++block) {
+ parameters_references_copy[block] = parameters_references_copy[block - 1]
+ + block_sizes[block - 1];
+ }
+
+ // Copy the parameters into the local temp space.
+ for (int block = 0; block < block_sizes.size(); ++block) {
+ memcpy(parameters_references_copy[block],
+ parameters[block],
+ block_sizes[block] * sizeof(*parameters[block]));
+ }
+
+ for (int block = 0; block < block_sizes.size(); ++block) {
+ if (!jacobians[block]) {
+ // No jacobian requested for this parameter / residual pair.
+ continue;
+ }
+ if (!EvaluateJacobianForParameterBlock(function_,
+ block_sizes[block],
+ block,
+ method_,
+ relative_step_size_,
+ residuals,
+ &parameters_references_copy[0],
+ jacobians)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private:
+ const CostFunction* function_;
+ RuntimeNumericDiffMethod method_;
+ double relative_step_size_;
+};
+
+} // namespace
+
+CostFunction* CreateRuntimeNumericDiffCostFunction(
+ const CostFunction* cost_function,
+ RuntimeNumericDiffMethod method,
+ double relative_step_size) {
+ return new RuntimeNumericDiffCostFunction(cost_function,
+ method,
+ relative_step_size);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/runtime_numeric_diff_cost_function.h b/internal/ceres/runtime_numeric_diff_cost_function.h
new file mode 100644
index 0000000..01b57f9
--- /dev/null
+++ b/internal/ceres/runtime_numeric_diff_cost_function.h
@@ -0,0 +1,87 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Create CostFunctions as needed by the least squares framework with jacobians
+// computed via numeric differentiation.
+//
+// To get a numerically differentiated cost function, define a subclass of
+// CostFunction such that the Evaluate() function ignores the jacobian
+// parameter. The numeric differentiation wrapper will fill in the jacobian
+// parameter if nececssary by repeatedly calling the Evaluate() function with
+// small changes to the appropriate parameters, and computing the slope. This
+// implementation is not templated (hence the "Runtime" prefix), which is a bit
+// slower than but is more convenient than the templated version in
+// numeric_diff_cost_function.h
+//
+// The numerically differentiated version of a cost function for a cost function
+// can be constructed as follows:
+//
+// CostFunction* cost_function =
+// CreateRuntimeNumericDiffCostFunction(new MyCostFunction(...),
+// CENTRAL,
+// TAKE_OWNERSHIP);
+//
+// The central difference method is considerably more accurate; consider using
+// to start and only after that works, trying forward difference.
+//
+// TODO(keir): Characterize accuracy; mention pitfalls; provide alternatives.
+
+#ifndef CERES_INTERNAL_RUNTIME_NUMERIC_DIFF_COST_FUNCTION_H_
+#define CERES_INTERNAL_RUNTIME_NUMERIC_DIFF_COST_FUNCTION_H_
+
+#include "ceres/cost_function.h"
+
+namespace ceres {
+namespace internal {
+
+enum RuntimeNumericDiffMethod {
+ CENTRAL,
+ FORWARD,
+};
+
+// Create a cost function that evaluates the derivative with finite differences.
+// The base cost_function's implementation of Evaluate() only needs to fill in
+// the "residuals" argument and not the "jacobians". Any data written to the
+// jacobians by the base cost_function is overwritten.
+//
+// Forward difference or central difference is selected with CENTRAL or FORWARD.
+// The relative eps, which determines the step size for forward and central
+// differencing, is set with relative eps. Caller owns the resulting cost
+// function, and the resulting cost function does not own the base cost
+// function.
+CostFunction *CreateRuntimeNumericDiffCostFunction(
+ const CostFunction *cost_function,
+ RuntimeNumericDiffMethod method,
+ double relative_eps);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_RUNTIME_NUMERIC_DIFF_COST_FUNCTION_H_
diff --git a/internal/ceres/runtime_numeric_diff_cost_function_test.cc b/internal/ceres/runtime_numeric_diff_cost_function_test.cc
new file mode 100644
index 0000000..71469ea
--- /dev/null
+++ b/internal/ceres/runtime_numeric_diff_cost_function_test.cc
@@ -0,0 +1,222 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Based on the tests in numeric_diff_cost_function.cc.
+//
+// TODO(keir): See about code duplication.
+
+#include "ceres/runtime_numeric_diff_cost_function.h"
+
+#include <algorithm>
+#include <cmath>
+#include <string>
+#include <vector>
+#include "ceres/cost_function.h"
+#include "ceres/internal/macros.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/stringprintf.h"
+#include "ceres/test_util.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+const double kRelativeEps = 1e-6;
+
+// y1 = x1'x2 -> dy1/dx1 = x2, dy1/dx2 = x1
+// y2 = (x1'x2)^2 -> dy2/dx1 = 2 * x2 * (x1'x2), dy2/dx2 = 2 * x1 * (x1'x2)
+// y3 = x2'x2 -> dy3/dx1 = 0, dy3/dx2 = 2 * x2
+class TestCostFunction : public CostFunction {
+ public:
+ TestCostFunction() {
+ set_num_residuals(3);
+ mutable_parameter_block_sizes()->push_back(5); // x1.
+ mutable_parameter_block_sizes()->push_back(5); // x2.
+ }
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ (void) jacobians; // Ignored.
+
+ residuals[0] = residuals[1] = residuals[2] = 0;
+ for (int i = 0; i < 5; ++i) {
+ residuals[0] += parameters[0][i] * parameters[1][i];
+ residuals[2] += parameters[1][i] * parameters[1][i];
+ }
+ residuals[1] = residuals[0] * residuals[0];
+ return true;
+ }
+};
+
+TEST(NumericDiffCostFunction, EasyCase) {
+ // Try both central and forward difference.
+ TestCostFunction term;
+ scoped_ptr<CostFunction> cfs[2];
+ cfs[0].reset(
+ CreateRuntimeNumericDiffCostFunction(&term, CENTRAL, kRelativeEps));
+
+ cfs[1].reset(
+ CreateRuntimeNumericDiffCostFunction(&term, FORWARD, kRelativeEps));
+
+
+ for (int c = 0; c < 2; ++c) {
+ CostFunction *cost_function = cfs[c].get();
+
+ double x1[] = { 1.0, 2.0, 3.0, 4.0, 5.0 };
+ double x2[] = { 9.0, 9.0, 5.0, 5.0, 1.0 };
+ double *parameters[] = { &x1[0], &x2[0] };
+
+ double dydx1[15]; // 3 x 5, row major.
+ double dydx2[15]; // 3 x 5, row major.
+ double *jacobians[2] = { &dydx1[0], &dydx2[0] };
+
+ double residuals[3] = {-1e-100, -2e-100, -3e-100 };
+
+ ASSERT_TRUE(cost_function->Evaluate(&parameters[0],
+ &residuals[0],
+ &jacobians[0]));
+
+ EXPECT_EQ(residuals[0], 67);
+ EXPECT_EQ(residuals[1], 4489);
+ EXPECT_EQ(residuals[2], 213);
+
+ for (int i = 0; i < 5; ++i) {
+ LOG(INFO) << "c = " << c << " i = " << i;
+ const double kEps = c == 0 ? /* central */ 3e-9 : /* forward */ 2e-5;
+
+ ExpectClose(x2[i], dydx1[5 * 0 + i], kEps); // y1
+ ExpectClose(x1[i], dydx2[5 * 0 + i], kEps);
+ ExpectClose(2 * x2[i] * residuals[0], dydx1[5 * 1 + i], kEps); // y2
+ ExpectClose(2 * x1[i] * residuals[0], dydx2[5 * 1 + i], kEps);
+ ExpectClose(0.0, dydx1[5 * 2 + i], kEps); // y3
+ ExpectClose(2 * x2[i], dydx2[5 * 2 + i], kEps);
+ }
+ }
+}
+
+// y1 = sin(x1'x2)
+// y2 = exp(-x1'x2 / 10)
+//
+// dy1/dx1 = x2 * cos(x1'x2), dy1/dx2 = x1 * cos(x1'x2)
+// dy2/dx1 = -x2 * exp(-x1'x2 / 10) / 10, dy2/dx2 = -x2 * exp(-x1'x2 / 10) / 10
+class TranscendentalTestCostFunction : public CostFunction {
+ public:
+ TranscendentalTestCostFunction() {
+ set_num_residuals(2);
+ mutable_parameter_block_sizes()->push_back(5); // x1.
+ mutable_parameter_block_sizes()->push_back(5); // x2.
+ }
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ (void) jacobians; // Ignored.
+
+ double x1x2 = 0;
+ for (int i = 0; i < 5; ++i) {
+ x1x2 += parameters[0][i] * parameters[1][i];
+ }
+ residuals[0] = sin(x1x2);
+ residuals[1] = exp(-x1x2 / 10);
+ return true;
+ }
+};
+
+TEST(NumericDiffCostFunction, TransendentalOperationsInCostFunction) {
+ // Try both central and forward difference.
+ TranscendentalTestCostFunction term;
+ scoped_ptr<CostFunction> cfs[2];
+ cfs[0].reset(
+ CreateRuntimeNumericDiffCostFunction(&term, CENTRAL, kRelativeEps));
+
+ cfs[1].reset(
+ CreateRuntimeNumericDiffCostFunction(&term, FORWARD, kRelativeEps));
+
+ for (int c = 0; c < 2; ++c) {
+ CostFunction *cost_function = cfs[c].get();
+
+ struct {
+ double x1[5];
+ double x2[5];
+ } kTests[] = {
+ { { 1.0, 2.0, 3.0, 4.0, 5.0 }, // No zeros.
+ { 9.0, 9.0, 5.0, 5.0, 1.0 },
+ },
+ { { 0.0, 2.0, 3.0, 0.0, 5.0 }, // Some zeros x1.
+ { 9.0, 9.0, 5.0, 5.0, 1.0 },
+ },
+ { { 1.0, 2.0, 3.0, 1.0, 5.0 }, // Some zeros x2.
+ { 0.0, 9.0, 0.0, 5.0, 0.0 },
+ },
+ { { 0.0, 0.0, 0.0, 0.0, 0.0 }, // All zeros x1.
+ { 9.0, 9.0, 5.0, 5.0, 1.0 },
+ },
+ { { 1.0, 2.0, 3.0, 4.0, 5.0 }, // All zeros x2.
+ { 0.0, 0.0, 0.0, 0.0, 0.0 },
+ },
+ { { 0.0, 0.0, 0.0, 0.0, 0.0 }, // All zeros.
+ { 0.0, 0.0, 0.0, 0.0, 0.0 },
+ },
+ };
+ for (int k = 0; k < CERES_ARRAYSIZE(kTests); ++k) {
+ double *x1 = &(kTests[k].x1[0]);
+ double *x2 = &(kTests[k].x2[0]);
+ double *parameters[] = { x1, x2 };
+
+ double dydx1[10];
+ double dydx2[10];
+ double *jacobians[2] = { &dydx1[0], &dydx2[0] };
+
+ double residuals[2];
+
+ ASSERT_TRUE(cost_function->Evaluate(&parameters[0],
+ &residuals[0],
+ &jacobians[0]));
+ LOG(INFO) << "Ran evaluate for test k=" << k << " c=" << c;
+
+ double x1x2 = 0;
+ for (int i = 0; i < 5; ++i) {
+ x1x2 += x1[i] * x2[i];
+ }
+
+ for (int i = 0; i < 5; ++i) {
+ const double kEps = (c == 0 ? /* central */ 3e-9 : /* forward */ 2e-5);
+
+ ExpectClose( x2[i] * cos(x1x2), dydx1[5 * 0 + i], kEps); // NOLINT
+ ExpectClose( x1[i] * cos(x1x2), dydx2[5 * 0 + i], kEps); // NOLINT
+ ExpectClose(-x2[i] * exp(-x1x2 / 10.) / 10., dydx1[5 * 1 + i], kEps);
+ ExpectClose(-x1[i] * exp(-x1x2 / 10.) / 10., dydx2[5 * 1 + i], kEps);
+ }
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
new file mode 100644
index 0000000..9b7d4e5
--- /dev/null
+++ b/internal/ceres/schur_complement_solver.cc
@@ -0,0 +1,387 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <algorithm>
+#include <ctime>
+#include <set>
+#include <vector>
+
+#ifndef CERES_NO_CXSPARSE
+#include "cs.h"
+#endif // CERES_NO_CXSPARSE
+
+#include "Eigen/Dense"
+#include "ceres/block_random_access_dense_matrix.h"
+#include "ceres/block_random_access_matrix.h"
+#include "ceres/block_random_access_sparse_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/detect_structure.h"
+#include "ceres/linear_solver.h"
+#include "ceres/schur_complement_solver.h"
+#include "ceres/suitesparse.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+
+namespace ceres {
+namespace internal {
+
+LinearSolver::Summary SchurComplementSolver::SolveImpl(
+ BlockSparseMatrixBase* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) {
+ const time_t start_time = time(NULL);
+ if (eliminator_.get() == NULL) {
+ InitStorage(A->block_structure());
+ DetectStructure(*A->block_structure(),
+ options_.elimination_groups[0],
+ &options_.row_block_size,
+ &options_.e_block_size,
+ &options_.f_block_size);
+ eliminator_.reset(CHECK_NOTNULL(SchurEliminatorBase::Create(options_)));
+ eliminator_->Init(options_.elimination_groups[0], A->block_structure());
+ };
+ const time_t init_time = time(NULL);
+ fill(x, x + A->num_cols(), 0.0);
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 1;
+ summary.termination_type = FAILURE;
+ eliminator_->Eliminate(A, b, per_solve_options.D, lhs_.get(), rhs_.get());
+ const time_t eliminate_time = time(NULL);
+
+ double* reduced_solution = x + A->num_cols() - lhs_->num_cols();
+ const bool status = SolveReducedLinearSystem(reduced_solution);
+ const time_t solve_time = time(NULL);
+
+ if (!status) {
+ return summary;
+ }
+
+ eliminator_->BackSubstitute(A, b, per_solve_options.D, reduced_solution, x);
+ const time_t backsubstitute_time = time(NULL);
+ summary.termination_type = TOLERANCE;
+
+ VLOG(2) << "time (sec) total: " << (backsubstitute_time - start_time)
+ << " init: " << (init_time - start_time)
+ << " eliminate: " << (eliminate_time - init_time)
+ << " solve: " << (solve_time - eliminate_time)
+ << " backsubstitute: " << (backsubstitute_time - solve_time);
+ return summary;
+}
+
+// Initialize a BlockRandomAccessDenseMatrix to store the Schur
+// complement.
+void DenseSchurComplementSolver::InitStorage(
+ const CompressedRowBlockStructure* bs) {
+ const int num_eliminate_blocks = options().elimination_groups[0];
+ const int num_col_blocks = bs->cols.size();
+
+ vector<int> blocks(num_col_blocks - num_eliminate_blocks, 0);
+ for (int i = num_eliminate_blocks, j = 0;
+ i < num_col_blocks;
+ ++i, ++j) {
+ blocks[j] = bs->cols[i].size;
+ }
+
+ set_lhs(new BlockRandomAccessDenseMatrix(blocks));
+ set_rhs(new double[lhs()->num_rows()]);
+}
+
+// Solve the system Sx = r, assuming that the matrix S is stored in a
+// BlockRandomAccessDenseMatrix. The linear system is solved using
+// Eigen's Cholesky factorization.
+bool DenseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
+ const BlockRandomAccessDenseMatrix* m =
+ down_cast<const BlockRandomAccessDenseMatrix*>(lhs());
+ const int num_rows = m->num_rows();
+
+ // The case where there are no f blocks, and the system is block
+ // diagonal.
+ if (num_rows == 0) {
+ return true;
+ }
+
+ // TODO(sameeragarwal): Add proper error handling; this completely ignores
+ // the quality of the solution to the solve.
+ VectorRef(solution, num_rows) =
+ ConstMatrixRef(m->values(), num_rows, num_rows)
+ .selfadjointView<Eigen::Upper>()
+ .ldlt()
+ .solve(ConstVectorRef(rhs(), num_rows));
+
+ return true;
+}
+
+
+SparseSchurComplementSolver::SparseSchurComplementSolver(
+ const LinearSolver::Options& options)
+ : SchurComplementSolver(options) {
+#ifndef CERES_NO_SUITESPARSE
+ factor_ = NULL;
+#endif // CERES_NO_SUITESPARSE
+
+#ifndef CERES_NO_CXSPARSE
+ cxsparse_factor_ = NULL;
+#endif // CERES_NO_CXSPARSE
+}
+
+SparseSchurComplementSolver::~SparseSchurComplementSolver() {
+#ifndef CERES_NO_SUITESPARSE
+ if (factor_ != NULL) {
+ ss_.Free(factor_);
+ factor_ = NULL;
+ }
+#endif // CERES_NO_SUITESPARSE
+
+#ifndef CERES_NO_CXSPARSE
+ if (cxsparse_factor_ != NULL) {
+ cxsparse_.Free(cxsparse_factor_);
+ cxsparse_factor_ = NULL;
+ }
+#endif // CERES_NO_CXSPARSE
+}
+
+// Determine the non-zero blocks in the Schur Complement matrix, and
+// initialize a BlockRandomAccessSparseMatrix object.
+void SparseSchurComplementSolver::InitStorage(
+ const CompressedRowBlockStructure* bs) {
+ const int num_eliminate_blocks = options().elimination_groups[0];
+ const int num_col_blocks = bs->cols.size();
+ const int num_row_blocks = bs->rows.size();
+
+ blocks_.resize(num_col_blocks - num_eliminate_blocks, 0);
+ for (int i = num_eliminate_blocks; i < num_col_blocks; ++i) {
+ blocks_[i - num_eliminate_blocks] = bs->cols[i].size;
+ }
+
+ set<pair<int, int> > block_pairs;
+ for (int i = 0; i < blocks_.size(); ++i) {
+ block_pairs.insert(make_pair(i, i));
+ }
+
+ int r = 0;
+ while (r < num_row_blocks) {
+ int e_block_id = bs->rows[r].cells.front().block_id;
+ if (e_block_id >= num_eliminate_blocks) {
+ break;
+ }
+ vector<int> f_blocks;
+
+ // Add to the chunk until the first block in the row is
+ // different than the one in the first row for the chunk.
+ for (; r < num_row_blocks; ++r) {
+ const CompressedRow& row = bs->rows[r];
+ if (row.cells.front().block_id != e_block_id) {
+ break;
+ }
+
+ // Iterate over the blocks in the row, ignoring the first
+ // block since it is the one to be eliminated.
+ for (int c = 1; c < row.cells.size(); ++c) {
+ const Cell& cell = row.cells[c];
+ f_blocks.push_back(cell.block_id - num_eliminate_blocks);
+ }
+ }
+
+ sort(f_blocks.begin(), f_blocks.end());
+ f_blocks.erase(unique(f_blocks.begin(), f_blocks.end()), f_blocks.end());
+ for (int i = 0; i < f_blocks.size(); ++i) {
+ for (int j = i + 1; j < f_blocks.size(); ++j) {
+ block_pairs.insert(make_pair(f_blocks[i], f_blocks[j]));
+ }
+ }
+ }
+
+ // Remaing rows do not contribute to the chunks and directly go
+ // into the schur complement via an outer product.
+ for (; r < num_row_blocks; ++r) {
+ const CompressedRow& row = bs->rows[r];
+ CHECK_GE(row.cells.front().block_id, num_eliminate_blocks);
+ for (int i = 0; i < row.cells.size(); ++i) {
+ int r_block1_id = row.cells[i].block_id - num_eliminate_blocks;
+ for (int j = 0; j < row.cells.size(); ++j) {
+ int r_block2_id = row.cells[j].block_id - num_eliminate_blocks;
+ if (r_block1_id <= r_block2_id) {
+ block_pairs.insert(make_pair(r_block1_id, r_block2_id));
+ }
+ }
+ }
+ }
+
+ set_lhs(new BlockRandomAccessSparseMatrix(blocks_, block_pairs));
+ set_rhs(new double[lhs()->num_rows()]);
+}
+
+bool SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
+ switch (options().sparse_linear_algebra_library) {
+ case SUITE_SPARSE:
+ return SolveReducedLinearSystemUsingSuiteSparse(solution);
+ case CX_SPARSE:
+ return SolveReducedLinearSystemUsingCXSparse(solution);
+ default:
+ LOG(FATAL) << "Unknown sparse linear algebra library : "
+ << options().sparse_linear_algebra_library;
+ }
+
+ LOG(FATAL) << "Unknown sparse linear algebra library : "
+ << options().sparse_linear_algebra_library;
+ return false;
+}
+
+#ifndef CERES_NO_SUITESPARSE
+// Solve the system Sx = r, assuming that the matrix S is stored in a
+// BlockRandomAccessSparseMatrix. The linear system is solved using
+// CHOLMOD's sparse cholesky factorization routines.
+bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
+ double* solution) {
+ const time_t start_time = time(NULL);
+
+ TripletSparseMatrix* tsm =
+ const_cast<TripletSparseMatrix*>(
+ down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
+
+ const int num_rows = tsm->num_rows();
+
+ // The case where there are no f blocks, and the system is block
+ // diagonal.
+ if (num_rows == 0) {
+ return true;
+ }
+
+ cholmod_sparse* cholmod_lhs = ss_.CreateSparseMatrix(tsm);
+ // The matrix is symmetric, and the upper triangular part of the
+ // matrix contains the values.
+ cholmod_lhs->stype = 1;
+ const time_t lhs_time = time(NULL);
+
+ cholmod_dense* cholmod_rhs =
+ ss_.CreateDenseVector(const_cast<double*>(rhs()), num_rows, num_rows);
+ const time_t rhs_time = time(NULL);
+
+ // Symbolic factorization is computed if we don't already have one handy.
+ if (factor_ == NULL) {
+ if (options().use_block_amd) {
+ factor_ = ss_.BlockAnalyzeCholesky(cholmod_lhs, blocks_, blocks_);
+ } else {
+ factor_ = ss_.AnalyzeCholesky(cholmod_lhs);
+ }
+
+ if (VLOG_IS_ON(2)) {
+ cholmod_print_common("Symbolic Analysis", ss_.mutable_cc());
+ }
+ }
+
+ CHECK_NOTNULL(factor_);
+
+ const time_t symbolic_time = time(NULL);
+ cholmod_dense* cholmod_solution =
+ ss_.SolveCholesky(cholmod_lhs, factor_, cholmod_rhs);
+
+ const time_t solve_time = time(NULL);
+
+ ss_.Free(cholmod_lhs);
+ cholmod_lhs = NULL;
+ ss_.Free(cholmod_rhs);
+ cholmod_rhs = NULL;
+
+ if (cholmod_solution == NULL) {
+ LOG(WARNING) << "CHOLMOD solve failed.";
+ return false;
+ }
+
+ VectorRef(solution, num_rows)
+ = VectorRef(static_cast<double*>(cholmod_solution->x), num_rows);
+ ss_.Free(cholmod_solution);
+ const time_t final_time = time(NULL);
+ VLOG(2) << "time: " << (final_time - start_time)
+ << " lhs : " << (lhs_time - start_time)
+ << " rhs: " << (rhs_time - lhs_time)
+ << " analyze: " << (symbolic_time - rhs_time)
+ << " factor_and_solve: " << (solve_time - symbolic_time)
+ << " cleanup: " << (final_time - solve_time);
+ return true;
+}
+#else
+bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
+ double* solution) {
+ LOG(FATAL) << "No SuiteSparse support in Ceres.";
+ return false;
+}
+#endif // CERES_NO_SUITESPARSE
+
+#ifndef CERES_NO_CXSPARSE
+// Solve the system Sx = r, assuming that the matrix S is stored in a
+// BlockRandomAccessSparseMatrix. The linear system is solved using
+// CXSparse's sparse cholesky factorization routines.
+bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
+ double* solution) {
+ // Extract the TripletSparseMatrix that is used for actually storing S.
+ TripletSparseMatrix* tsm =
+ const_cast<TripletSparseMatrix*>(
+ down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
+
+ const int num_rows = tsm->num_rows();
+
+ // The case where there are no f blocks, and the system is block
+ // diagonal.
+ if (num_rows == 0) {
+ return true;
+ }
+
+ cs_di* lhs = CHECK_NOTNULL(cxsparse_.CreateSparseMatrix(tsm));
+ VectorRef(solution, num_rows) = ConstVectorRef(rhs(), num_rows);
+
+ // Compute symbolic factorization if not available.
+ if (cxsparse_factor_ == NULL) {
+ cxsparse_factor_ = CHECK_NOTNULL(cxsparse_.AnalyzeCholesky(lhs));
+ }
+
+ // Solve the linear system.
+ bool ok = cxsparse_.SolveCholesky(lhs, cxsparse_factor_, solution);
+
+ cxsparse_.Free(lhs);
+ return ok;
+}
+#else
+bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
+ double* solution) {
+ LOG(FATAL) << "No CXSparse support in Ceres.";
+ return false;
+}
+#endif // CERES_NO_CXPARSE
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/schur_complement_solver.h b/internal/ceres/schur_complement_solver.h
new file mode 100644
index 0000000..c382f95
--- /dev/null
+++ b/internal/ceres/schur_complement_solver.h
@@ -0,0 +1,187 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_SCHUR_COMPLEMENT_SOLVER_H_
+#define CERES_INTERNAL_SCHUR_COMPLEMENT_SOLVER_H_
+
+#include <set>
+#include <utility>
+
+#include "ceres/block_random_access_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/cxsparse.h"
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/suitesparse.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockSparseMatrixBase;
+
+// Base class for Schur complement based linear least squares
+// solvers. It assumes that the input linear system Ax = b can be
+// partitioned into
+//
+// E y + F z = b
+//
+// Where x = [y;z] is a partition of the variables. The paritioning
+// of the variables is such that, E'E is a block diagonal
+// matrix. Further, the rows of A are ordered so that for every
+// variable block in y, all the rows containing that variable block
+// occur as a vertically contiguous block. i.e the matrix A looks like
+//
+// E F
+// A = [ y1 0 0 0 | z1 0 0 0 z5]
+// [ y1 0 0 0 | z1 z2 0 0 0]
+// [ 0 y2 0 0 | 0 0 z3 0 0]
+// [ 0 0 y3 0 | z1 z2 z3 z4 z5]
+// [ 0 0 y3 0 | z1 0 0 0 z5]
+// [ 0 0 0 y4 | 0 0 0 0 z5]
+// [ 0 0 0 y4 | 0 z2 0 0 0]
+// [ 0 0 0 y4 | 0 0 0 0 0]
+// [ 0 0 0 0 | z1 0 0 0 0]
+// [ 0 0 0 0 | 0 0 z3 z4 z5]
+//
+// This structure should be reflected in the corresponding
+// CompressedRowBlockStructure object associated with A. The linear
+// system Ax = b should either be well posed or the array D below
+// should be non-null and the diagonal matrix corresponding to it
+// should be non-singular.
+//
+// SchurComplementSolver has two sub-classes.
+//
+// DenseSchurComplementSolver: For problems where the Schur complement
+// matrix is small and dense, or if CHOLMOD/SuiteSparse is not
+// installed. For structure from motion problems, this is solver can
+// be used for problems with upto a few hundred cameras.
+//
+// SparseSchurComplementSolver: For problems where the Schur
+// complement matrix is large and sparse. It requires that
+// CHOLMOD/SuiteSparse be installed, as it uses CHOLMOD to find a
+// sparse Cholesky factorization of the Schur complement. This solver
+// can be used for solving structure from motion problems with tens of
+// thousands of cameras, though depending on the exact sparsity
+// structure, it maybe better to use an iterative solver.
+//
+// The two solvers can be instantiated by calling
+// LinearSolver::CreateLinearSolver with LinearSolver::Options::type
+// set to DENSE_SCHUR and SPARSE_SCHUR
+// respectively. LinearSolver::Options::elimination_groups[0] should be
+// at least 1.
+class SchurComplementSolver : public BlockSparseMatrixBaseSolver {
+ public:
+ explicit SchurComplementSolver(const LinearSolver::Options& options)
+ : options_(options) {
+ CHECK_GT(options.elimination_groups.size(), 1);
+ CHECK_GT(options.elimination_groups[0], 0);
+ }
+
+ // LinearSolver methods
+ virtual ~SchurComplementSolver() {}
+ virtual LinearSolver::Summary SolveImpl(
+ BlockSparseMatrixBase* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x);
+
+ protected:
+ const LinearSolver::Options& options() const { return options_; }
+
+ const BlockRandomAccessMatrix* lhs() const { return lhs_.get(); }
+ void set_lhs(BlockRandomAccessMatrix* lhs) { lhs_.reset(lhs); }
+ const double* rhs() const { return rhs_.get(); }
+ void set_rhs(double* rhs) { rhs_.reset(rhs); }
+
+ private:
+ virtual void InitStorage(const CompressedRowBlockStructure* bs) = 0;
+ virtual bool SolveReducedLinearSystem(double* solution) = 0;
+
+ LinearSolver::Options options_;
+
+ scoped_ptr<SchurEliminatorBase> eliminator_;
+ scoped_ptr<BlockRandomAccessMatrix> lhs_;
+ scoped_array<double> rhs_;
+
+ CERES_DISALLOW_COPY_AND_ASSIGN(SchurComplementSolver);
+};
+
+// Dense Cholesky factorization based solver.
+class DenseSchurComplementSolver : public SchurComplementSolver {
+ public:
+ explicit DenseSchurComplementSolver(const LinearSolver::Options& options)
+ : SchurComplementSolver(options) {}
+ virtual ~DenseSchurComplementSolver() {}
+
+ private:
+ virtual void InitStorage(const CompressedRowBlockStructure* bs);
+ virtual bool SolveReducedLinearSystem(double* solution);
+
+ CERES_DISALLOW_COPY_AND_ASSIGN(DenseSchurComplementSolver);
+};
+
+
+// Sparse Cholesky factorization based solver.
+class SparseSchurComplementSolver : public SchurComplementSolver {
+ public:
+ explicit SparseSchurComplementSolver(const LinearSolver::Options& options);
+ virtual ~SparseSchurComplementSolver();
+
+ private:
+ virtual void InitStorage(const CompressedRowBlockStructure* bs);
+ virtual bool SolveReducedLinearSystem(double* solution);
+ bool SolveReducedLinearSystemUsingSuiteSparse(double* solution);
+ bool SolveReducedLinearSystemUsingCXSparse(double* solution);
+
+ // Size of the blocks in the Schur complement.
+ vector<int> blocks_;
+
+#ifndef CERES_NO_SUITESPARSE
+ SuiteSparse ss_;
+ // Symbolic factorization of the reduced linear system. Precomputed
+ // once and reused in subsequent calls.
+ cholmod_factor* factor_;
+#endif // CERES_NO_SUITESPARSE
+
+#ifndef CERES_NO_CXSPARSE
+ CXSparse cxsparse_;
+ // Cached factorization
+ cs_dis* cxsparse_factor_;
+#endif // CERES_NO_CXSPARSE
+ CERES_DISALLOW_COPY_AND_ASSIGN(SparseSchurComplementSolver);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SCHUR_COMPLEMENT_SOLVER_H_
diff --git a/internal/ceres/schur_complement_solver_test.cc b/internal/ceres/schur_complement_solver_test.cc
new file mode 100644
index 0000000..71c6cfd
--- /dev/null
+++ b/internal/ceres/schur_complement_solver_test.cc
@@ -0,0 +1,168 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <cstddef>
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/casts.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/schur_complement_solver.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+class SchurComplementSolverTest : public ::testing::Test {
+ protected:
+ void SetUpFromProblemId(int problem_id) {
+ scoped_ptr<LinearLeastSquaresProblem> problem(
+ CreateLinearLeastSquaresProblemFromId(problem_id));
+
+ CHECK_NOTNULL(problem.get());
+ A.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+ b.reset(problem->b.release());
+ D.reset(problem->D.release());
+
+ num_cols = A->num_cols();
+ num_rows = A->num_rows();
+ num_eliminate_blocks = problem->num_eliminate_blocks;
+
+ x.reset(new double[num_cols]);
+ sol.reset(new double[num_cols]);
+ sol_d.reset(new double[num_cols]);
+
+ LinearSolver::Options options;
+ options.type = DENSE_QR;
+
+ scoped_ptr<LinearSolver> qr(LinearSolver::Create(options));
+
+ TripletSparseMatrix triplet_A(A->num_rows(),
+ A->num_cols(),
+ A->num_nonzeros());
+ A->ToTripletSparseMatrix(&triplet_A);
+
+ // Gold standard solutions using dense QR factorization.
+ DenseSparseMatrix dense_A(triplet_A);
+ LinearSolver::Summary summary1 =
+ qr->Solve(&dense_A,
+ b.get(),
+ LinearSolver::PerSolveOptions(),
+ sol.get());
+
+ // Gold standard solution with appended diagonal.
+ LinearSolver::PerSolveOptions per_solve_options;
+ per_solve_options.D = D.get();
+ LinearSolver::Summary summary2 =
+ qr->Solve(&dense_A,
+ b.get(),
+ per_solve_options,
+ sol_d.get());
+ }
+
+ void ComputeAndCompareSolutions(
+ int problem_id,
+ bool regularization,
+ ceres::LinearSolverType linear_solver_type,
+ ceres::SparseLinearAlgebraLibraryType sparse_linear_algebra_library) {
+ SetUpFromProblemId(problem_id);
+ LinearSolver::Options options;
+ options.elimination_groups.push_back(num_eliminate_blocks);
+ options.elimination_groups.push_back(
+ A->block_structure()->cols.size() - num_eliminate_blocks);
+ options.type = linear_solver_type;
+ options.sparse_linear_algebra_library = sparse_linear_algebra_library;
+
+ scoped_ptr<LinearSolver> solver(LinearSolver::Create(options));
+
+ LinearSolver::PerSolveOptions per_solve_options;
+ LinearSolver::Summary summary;
+ if (regularization) {
+ per_solve_options.D = D.get();
+ }
+
+ summary = solver->Solve(A.get(), b.get(), per_solve_options, x.get());
+
+ if (regularization) {
+ for (int i = 0; i < num_cols; ++i) {
+ ASSERT_NEAR(sol_d.get()[i], x[i], 1e-10);
+ }
+ } else {
+ for (int i = 0; i < num_cols; ++i) {
+ ASSERT_NEAR(sol.get()[i], x[i], 1e-10);
+ }
+ }
+ }
+
+ int num_rows;
+ int num_cols;
+ int num_eliminate_blocks;
+
+ scoped_ptr<BlockSparseMatrix> A;
+ scoped_array<double> b;
+ scoped_array<double> x;
+ scoped_array<double> D;
+ scoped_array<double> sol;
+ scoped_array<double> sol_d;
+};
+
+#ifndef CERES_NO_SUITESPARSE
+TEST_F(SchurComplementSolverTest, SparseSchurWithSuiteSparse) {
+ ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, SUITE_SPARSE);
+ ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, SUITE_SPARSE);
+ ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, SUITE_SPARSE);
+ ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, SUITE_SPARSE);
+}
+#endif // CERES_NO_SUITESPARSE
+
+#ifndef CERES_NO_CXSPARSE
+TEST_F(SchurComplementSolverTest, SparseSchurWithCXSparse) {
+ ComputeAndCompareSolutions(2, false, SPARSE_SCHUR, CX_SPARSE);
+ ComputeAndCompareSolutions(3, false, SPARSE_SCHUR, CX_SPARSE);
+ ComputeAndCompareSolutions(2, true, SPARSE_SCHUR, CX_SPARSE);
+ ComputeAndCompareSolutions(3, true, SPARSE_SCHUR, CX_SPARSE);
+}
+#endif // CERES_NO_CXSPARSE
+
+TEST_F(SchurComplementSolverTest, DenseSchur) {
+ // The sparse linear algebra library type is ignored for
+ // DENSE_SCHUR.
+ ComputeAndCompareSolutions(2, false, DENSE_SCHUR, SUITE_SPARSE);
+ ComputeAndCompareSolutions(3, false, DENSE_SCHUR, SUITE_SPARSE);
+ ComputeAndCompareSolutions(2, true, DENSE_SCHUR, SUITE_SPARSE);
+ ComputeAndCompareSolutions(3, true, DENSE_SCHUR, SUITE_SPARSE);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/schur_eliminator.cc b/internal/ceres/schur_eliminator.cc
new file mode 100644
index 0000000..44f5be3
--- /dev/null
+++ b/internal/ceres/schur_eliminator.cc
@@ -0,0 +1,141 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+SchurEliminatorBase*
+SchurEliminatorBase::Create(const LinearSolver::Options& options) {
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 2)) {
+ return new SchurEliminator<2, 2, 2>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 3)) {
+ return new SchurEliminator<2, 2, 3>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 4)) {
+ return new SchurEliminator<2, 2, 4>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == Dynamic)) {
+ return new SchurEliminator<2, 2, Dynamic>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 3)) {
+ return new SchurEliminator<2, 3, 3>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 4)) {
+ return new SchurEliminator<2, 3, 4>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 9)) {
+ return new SchurEliminator<2, 3, 9>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == Dynamic)) {
+ return new SchurEliminator<2, 3, Dynamic>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 3)) {
+ return new SchurEliminator<2, 4, 3>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 4)) {
+ return new SchurEliminator<2, 4, 4>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == Dynamic)) {
+ return new SchurEliminator<2, 4, Dynamic>(options);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 2)) {
+ return new SchurEliminator<4, 4, 2>(options);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 3)) {
+ return new SchurEliminator<4, 4, 3>(options);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 4)) {
+ return new SchurEliminator<4, 4, 4>(options);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == Dynamic)) {
+ return new SchurEliminator<4, 4, Dynamic>(options);
+ }
+ if ((options.row_block_size == Dynamic) &&
+ (options.e_block_size == Dynamic) &&
+ (options.f_block_size == Dynamic)) {
+ return new SchurEliminator<Dynamic, Dynamic, Dynamic>(options);
+ }
+
+#endif
+ VLOG(1) << "Template specializations not found for <"
+ << options.row_block_size << ","
+ << options.e_block_size << ","
+ << options.f_block_size << ">";
+ return new SchurEliminator<Dynamic, Dynamic, Dynamic>(options);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/schur_eliminator.h b/internal/ceres/schur_eliminator.h
new file mode 100644
index 0000000..c24fe43
--- /dev/null
+++ b/internal/ceres/schur_eliminator.h
@@ -0,0 +1,339 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_H_
+#define CERES_INTERNAL_SCHUR_ELIMINATOR_H_
+
+#include <map>
+#include <vector>
+#include "ceres/mutex.h"
+#include "ceres/block_random_access_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/linear_solver.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+// Classes implementing the SchurEliminatorBase interface implement
+// variable elimination for linear least squares problems. Assuming
+// that the input linear system Ax = b can be partitioned into
+//
+// E y + F z = b
+//
+// Where x = [y;z] is a partition of the variables. The paritioning
+// of the variables is such that, E'E is a block diagonal matrix. Or
+// in other words, the parameter blocks in E form an independent set
+// of the of the graph implied by the block matrix A'A. Then, this
+// class provides the functionality to compute the Schur complement
+// system
+//
+// S z = r
+//
+// where
+//
+// S = F'F - F'E (E'E)^{-1} E'F and r = F'b - F'E(E'E)^(-1) E'b
+//
+// This is the Eliminate operation, i.e., construct the linear system
+// obtained by eliminating the variables in E.
+//
+// The eliminator also provides the reverse functionality, i.e. given
+// values for z it can back substitute for the values of y, by solving the
+// linear system
+//
+// Ey = b - F z
+//
+// which is done by observing that
+//
+// y = (E'E)^(-1) [E'b - E'F z]
+//
+// The eliminator has a number of requirements.
+//
+// The rows of A are ordered so that for every variable block in y,
+// all the rows containing that variable block occur as a vertically
+// contiguous block. i.e the matrix A looks like
+//
+// E F chunk
+// A = [ y1 0 0 0 | z1 0 0 0 z5] 1
+// [ y1 0 0 0 | z1 z2 0 0 0] 1
+// [ 0 y2 0 0 | 0 0 z3 0 0] 2
+// [ 0 0 y3 0 | z1 z2 z3 z4 z5] 3
+// [ 0 0 y3 0 | z1 0 0 0 z5] 3
+// [ 0 0 0 y4 | 0 0 0 0 z5] 4
+// [ 0 0 0 y4 | 0 z2 0 0 0] 4
+// [ 0 0 0 y4 | 0 0 0 0 0] 4
+// [ 0 0 0 0 | z1 0 0 0 0] non chunk blocks
+// [ 0 0 0 0 | 0 0 z3 z4 z5] non chunk blocks
+//
+// This structure should be reflected in the corresponding
+// CompressedRowBlockStructure object associated with A. The linear
+// system Ax = b should either be well posed or the array D below
+// should be non-null and the diagonal matrix corresponding to it
+// should be non-singular. For simplicity of exposition only the case
+// with a null D is described.
+//
+// The usual way to do the elimination is as follows. Starting with
+//
+// E y + F z = b
+//
+// we can form the normal equations,
+//
+// E'E y + E'F z = E'b
+// F'E y + F'F z = F'b
+//
+// multiplying both sides of the first equation by (E'E)^(-1) and then
+// by F'E we get
+//
+// F'E y + F'E (E'E)^(-1) E'F z = F'E (E'E)^(-1) E'b
+// F'E y + F'F z = F'b
+//
+// now subtracting the two equations we get
+//
+// [FF' - F'E (E'E)^(-1) E'F] z = F'b - F'E(E'E)^(-1) E'b
+//
+// Instead of forming the normal equations and operating on them as
+// general sparse matrices, the algorithm here deals with one
+// parameter block in y at a time. The rows corresponding to a single
+// parameter block yi are known as a chunk, and the algorithm operates
+// on one chunk at a time. The mathematics remains the same since the
+// reduced linear system can be shown to be the sum of the reduced
+// linear systems for each chunk. This can be seen by observing two
+// things.
+//
+// 1. E'E is a block diagonal matrix.
+//
+// 2. When E'F is computed, only the terms within a single chunk
+// interact, i.e for y1 column blocks when transposed and multiplied
+// with F, the only non-zero contribution comes from the blocks in
+// chunk1.
+//
+// Thus, the reduced linear system
+//
+// FF' - F'E (E'E)^(-1) E'F
+//
+// can be re-written as
+//
+// sum_k F_k F_k' - F_k'E_k (E_k'E_k)^(-1) E_k' F_k
+//
+// Where the sum is over chunks and E_k'E_k is dense matrix of size y1
+// x y1.
+//
+// Advanced usage. Uptil now it has been assumed that the user would
+// be interested in all of the Schur Complement S. However, it is also
+// possible to use this eliminator to obtain an arbitrary submatrix of
+// the full Schur complement. When the eliminator is generating the
+// blocks of S, it asks the RandomAccessBlockMatrix instance passed to
+// it if it has storage for that block. If it does, the eliminator
+// computes/updates it, if not it is skipped. This is useful when one
+// is interested in constructing a preconditioner based on the Schur
+// Complement, e.g., computing the block diagonal of S so that it can
+// be used as a preconditioner for an Iterative Substructuring based
+// solver [See Agarwal et al, Bundle Adjustment in the Large, ECCV
+// 2008 for an example of such use].
+//
+// Example usage: Please see schur_complement_solver.cc
+class SchurEliminatorBase {
+ public:
+ virtual ~SchurEliminatorBase() {}
+
+ // Initialize the eliminator. It is the user's responsibilty to call
+ // this function before calling Eliminate or BackSubstitute. It is
+ // also the caller's responsibilty to ensure that the
+ // CompressedRowBlockStructure object passed to this method is the
+ // same one (or is equivalent to) the one associated with the
+ // BlockSparseMatrixBase objects below.
+ virtual void Init(int num_eliminate_blocks,
+ const CompressedRowBlockStructure* bs) = 0;
+
+ // Compute the Schur complement system from the augmented linear
+ // least squares problem [A;D] x = [b;0]. The left hand side and the
+ // right hand side of the reduced linear system are returned in lhs
+ // and rhs respectively.
+ //
+ // It is the caller's responsibility to construct and initialize
+ // lhs. Depending upon the structure of the lhs object passed here,
+ // the full or a submatrix of the Schur complement will be computed.
+ //
+ // Since the Schur complement is a symmetric matrix, only the upper
+ // triangular part of the Schur complement is computed.
+ virtual void Eliminate(const BlockSparseMatrixBase* A,
+ const double* b,
+ const double* D,
+ BlockRandomAccessMatrix* lhs,
+ double* rhs) = 0;
+
+ // Given values for the variables z in the F block of A, solve for
+ // the optimal values of the variables y corresponding to the E
+ // block in A.
+ virtual void BackSubstitute(const BlockSparseMatrixBase* A,
+ const double* b,
+ const double* D,
+ const double* z,
+ double* y) = 0;
+ // Factory
+ static SchurEliminatorBase* Create(const LinearSolver::Options& options);
+};
+
+// Templated implementation of the SchurEliminatorBase interface. The
+// templating is on the sizes of the row, e and f blocks sizes in the
+// input matrix. In many problems, the sizes of one or more of these
+// blocks are constant, in that case, its worth passing these
+// parameters as template arguments so that they are visible to the
+// compiler and can be used for compile time optimization of the low
+// level linear algebra routines.
+//
+// This implementation is mulithreaded using OpenMP. The level of
+// parallelism is controlled by LinearSolver::Options::num_threads.
+template <int kRowBlockSize = Dynamic,
+ int kEBlockSize = Dynamic,
+ int kFBlockSize = Dynamic >
+class SchurEliminator : public SchurEliminatorBase {
+ public:
+ explicit SchurEliminator(const LinearSolver::Options& options)
+ : num_threads_(options.num_threads) {
+ }
+
+ // SchurEliminatorBase Interface
+ virtual ~SchurEliminator();
+ virtual void Init(int num_eliminate_blocks,
+ const CompressedRowBlockStructure* bs);
+ virtual void Eliminate(const BlockSparseMatrixBase* A,
+ const double* b,
+ const double* D,
+ BlockRandomAccessMatrix* lhs,
+ double* rhs);
+ virtual void BackSubstitute(const BlockSparseMatrixBase* A,
+ const double* b,
+ const double* D,
+ const double* z,
+ double* y);
+
+ private:
+ // Chunk objects store combinatorial information needed to
+ // efficiently eliminate a whole chunk out of the least squares
+ // problem. Consider the first chunk in the example matrix above.
+ //
+ // [ y1 0 0 0 | z1 0 0 0 z5]
+ // [ y1 0 0 0 | z1 z2 0 0 0]
+ //
+ // One of the intermediate quantities that needs to be calculated is
+ // for each row the product of the y block transposed with the
+ // non-zero z block, and the sum of these blocks across rows. A
+ // temporary array "buffer_" is used for computing and storing them
+ // and the buffer_layout maps the indices of the z-blocks to
+ // position in the buffer_ array. The size of the chunk is the
+ // number of row blocks/residual blocks for the particular y block
+ // being considered.
+ //
+ // For the example chunk shown above,
+ //
+ // size = 2
+ //
+ // The entries of buffer_layout will be filled in the following order.
+ //
+ // buffer_layout[z1] = 0
+ // buffer_layout[z5] = y1 * z1
+ // buffer_layout[z2] = y1 * z1 + y1 * z5
+ typedef map<int, int> BufferLayoutType;
+ struct Chunk {
+ Chunk() : size(0) {}
+ int size;
+ int start;
+ BufferLayoutType buffer_layout;
+ };
+
+ void ChunkDiagonalBlockAndGradient(
+ const Chunk& chunk,
+ const BlockSparseMatrixBase* A,
+ const double* b,
+ int row_block_counter,
+ typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* eet,
+ typename EigenTypes<kEBlockSize>::Vector* g,
+ double* buffer,
+ BlockRandomAccessMatrix* lhs);
+
+ void UpdateRhs(const Chunk& chunk,
+ const BlockSparseMatrixBase* A,
+ const double* b,
+ int row_block_counter,
+ const Vector& inverse_ete_g,
+ double* rhs);
+
+ void ChunkOuterProduct(const CompressedRowBlockStructure* bs,
+ const Matrix& inverse_eet,
+ const double* buffer,
+ const BufferLayoutType& buffer_layout,
+ BlockRandomAccessMatrix* lhs);
+ void EBlockRowOuterProduct(const BlockSparseMatrixBase* A,
+ int row_block_index,
+ BlockRandomAccessMatrix* lhs);
+
+
+ void NoEBlockRowsUpdate(const BlockSparseMatrixBase* A,
+ const double* b,
+ int row_block_counter,
+ BlockRandomAccessMatrix* lhs,
+ double* rhs);
+
+ void NoEBlockRowOuterProduct(const BlockSparseMatrixBase* A,
+ int row_block_index,
+ BlockRandomAccessMatrix* lhs);
+
+ int num_eliminate_blocks_;
+
+ // Block layout of the columns of the reduced linear system. Since
+ // the f blocks can be of varying size, this vector stores the
+ // position of each f block in the row/col of the reduced linear
+ // system. Thus lhs_row_layout_[i] is the row/col position of the
+ // i^th f block.
+ vector<int> lhs_row_layout_;
+
+ // Combinatorial structure of the chunks in A. For more information
+ // see the documentation of the Chunk object above.
+ vector<Chunk> chunks_;
+
+ // Buffer to store the products of the y and z blocks generated
+ // during the elimination phase.
+ scoped_array<double> buffer_;
+ int buffer_size_;
+ int num_threads_;
+ int uneliminated_row_begins_;
+
+ // Locks for the blocks in the right hand side of the reduced linear
+ // system.
+ vector<Mutex*> rhs_locks_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SCHUR_ELIMINATOR_H_
diff --git a/internal/ceres/schur_eliminator_impl.h b/internal/ceres/schur_eliminator_impl.h
new file mode 100644
index 0000000..6120db9
--- /dev/null
+++ b/internal/ceres/schur_eliminator_impl.h
@@ -0,0 +1,724 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// TODO(sameeragarwal): row_block_counter can perhaps be replaced by
+// Chunk::start ?
+
+#ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
+#define CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
+
+#ifdef CERES_USE_OPENMP
+#include <omp.h>
+#endif
+
+// Eigen has an internal threshold switching between different matrix
+// multiplication algorithms. In particular for matrices larger than
+// EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD it uses a cache friendly
+// matrix matrix product algorithm that has a higher setup cost. For
+// matrix sizes close to this threshold, especially when the matrices
+// are thin and long, the default choice may not be optimal. This is
+// the case for us, as the default choice causes a 30% performance
+// regression when we moved from Eigen2 to Eigen3.
+#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
+
+#include <algorithm>
+#include <map>
+#include <glog/logging.h>
+#include "Eigen/Dense"
+#include "ceres/block_random_access_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/block_structure.h"
+#include "ceres/map_util.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/stl_util.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::~SchurEliminator() {
+ STLDeleteElements(&rhs_locks_);
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+Init(int num_eliminate_blocks, const CompressedRowBlockStructure* bs) {
+ CHECK_GT(num_eliminate_blocks, 0)
+ << "SchurComplementSolver cannot be initialized with "
+ << "num_eliminate_blocks = 0.";
+
+ num_eliminate_blocks_ = num_eliminate_blocks;
+
+ const int num_col_blocks = bs->cols.size();
+ const int num_row_blocks = bs->rows.size();
+
+ buffer_size_ = 1;
+ chunks_.clear();
+ lhs_row_layout_.clear();
+
+ int lhs_num_rows = 0;
+ // Add a map object for each block in the reduced linear system
+ // and build the row/column block structure of the reduced linear
+ // system.
+ lhs_row_layout_.resize(num_col_blocks - num_eliminate_blocks_);
+ for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
+ lhs_row_layout_[i - num_eliminate_blocks_] = lhs_num_rows;
+ lhs_num_rows += bs->cols[i].size;
+ }
+
+ int r = 0;
+ // Iterate over the row blocks of A, and detect the chunks. The
+ // matrix should already have been ordered so that all rows
+ // containing the same y block are vertically contiguous. Along
+ // the way also compute the amount of space each chunk will need
+ // to perform the elimination.
+ while (r < num_row_blocks) {
+ const int chunk_block_id = bs->rows[r].cells.front().block_id;
+ if (chunk_block_id >= num_eliminate_blocks_) {
+ break;
+ }
+
+ chunks_.push_back(Chunk());
+ Chunk& chunk = chunks_.back();
+ chunk.size = 0;
+ chunk.start = r;
+ int buffer_size = 0;
+ const int e_block_size = bs->cols[chunk_block_id].size;
+
+ // Add to the chunk until the first block in the row is
+ // different than the one in the first row for the chunk.
+ while (r + chunk.size < num_row_blocks) {
+ const CompressedRow& row = bs->rows[r + chunk.size];
+ if (row.cells.front().block_id != chunk_block_id) {
+ break;
+ }
+
+ // Iterate over the blocks in the row, ignoring the first
+ // block since it is the one to be eliminated.
+ for (int c = 1; c < row.cells.size(); ++c) {
+ const Cell& cell = row.cells[c];
+ if (InsertIfNotPresent(
+ &(chunk.buffer_layout), cell.block_id, buffer_size)) {
+ buffer_size += e_block_size * bs->cols[cell.block_id].size;
+ }
+ }
+
+ buffer_size_ = max(buffer_size, buffer_size_);
+ ++chunk.size;
+ }
+
+ CHECK_GT(chunk.size, 0);
+ r += chunk.size;
+ }
+ const Chunk& chunk = chunks_.back();
+
+ uneliminated_row_begins_ = chunk.start + chunk.size;
+ if (num_threads_ > 1) {
+ random_shuffle(chunks_.begin(), chunks_.end());
+ }
+
+ buffer_.reset(new double[buffer_size_ * num_threads_]);
+
+ STLDeleteElements(&rhs_locks_);
+ rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_);
+ for (int i = 0; i < num_col_blocks - num_eliminate_blocks_; ++i) {
+ rhs_locks_[i] = new Mutex;
+ }
+
+ VLOG(1) << "Eliminator threads: " << num_threads_;
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+Eliminate(const BlockSparseMatrixBase* A,
+ const double* b,
+ const double* D,
+ BlockRandomAccessMatrix* lhs,
+ double* rhs) {
+ if (lhs->num_rows() > 0) {
+ lhs->SetZero();
+ VectorRef(rhs, lhs->num_rows()).setZero();
+ }
+
+ const CompressedRowBlockStructure* bs = A->block_structure();
+ const int num_col_blocks = bs->cols.size();
+
+ // Add the diagonal to the schur complement.
+ if (D != NULL) {
+#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
+ for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
+ const int block_id = i - num_eliminate_blocks_;
+ int r, c, row_stride, col_stride;
+ CellInfo* cell_info = lhs->GetCell(block_id, block_id,
+ &r, &c,
+ &row_stride, &col_stride);
+ if (cell_info != NULL) {
+ const int block_size = bs->cols[i].size;
+ typename EigenTypes<kFBlockSize>::ConstVectorRef
+ diag(D + bs->cols[i].position, block_size);
+
+ CeresMutexLock l(&cell_info->m);
+ MatrixRef m(cell_info->values, row_stride, col_stride);
+ m.block(r, c, block_size, block_size).diagonal()
+ += diag.array().square().matrix();
+ }
+ }
+ }
+
+ // Eliminate y blocks one chunk at a time. For each chunk,x3
+ // compute the entries of the normal equations and the gradient
+ // vector block corresponding to the y block and then apply
+ // Gaussian elimination to them. The matrix ete stores the normal
+ // matrix corresponding to the block being eliminated and array
+ // buffer_ contains the non-zero blocks in the row corresponding
+ // to this y block in the normal equations. This computation is
+ // done in ChunkDiagonalBlockAndGradient. UpdateRhs then applies
+ // gaussian elimination to the rhs of the normal equations,
+ // updating the rhs of the reduced linear system by modifying rhs
+ // blocks for all the z blocks that share a row block/residual
+ // term with the y block. EliminateRowOuterProduct does the
+ // corresponding operation for the lhs of the reduced linear
+ // system.
+#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
+ for (int i = 0; i < chunks_.size(); ++i) {
+#ifdef CERES_USE_OPENMP
+ int thread_id = omp_get_thread_num();
+#else
+ int thread_id = 0;
+#endif
+ double* buffer = buffer_.get() + thread_id * buffer_size_;
+ const Chunk& chunk = chunks_[i];
+ const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
+ const int e_block_size = bs->cols[e_block_id].size;
+
+ VectorRef(buffer, buffer_size_).setZero();
+
+ typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
+ ete(e_block_size, e_block_size);
+
+ if (D != NULL) {
+ const typename EigenTypes<kEBlockSize>::ConstVectorRef
+ diag(D + bs->cols[e_block_id].position, e_block_size);
+ ete = diag.array().square().matrix().asDiagonal();
+ } else {
+ ete.setZero();
+ }
+
+ typename EigenTypes<kEBlockSize>::Vector g(e_block_size);
+ g.setZero();
+
+ // We are going to be computing
+ //
+ // S += F'F - F'E(E'E)^{-1}E'F
+ //
+ // for each Chunk. The computation is broken down into a number of
+ // function calls as below.
+
+ // Compute the outer product of the e_blocks with themselves (ete
+ // = E'E). Compute the product of the e_blocks with the
+ // corresonding f_blocks (buffer = E'F), the gradient of the terms
+ // in this chunk (g) and add the outer product of the f_blocks to
+ // Schur complement (S += F'F).
+ ChunkDiagonalBlockAndGradient(
+ chunk, A, b, chunk.start, &ete, &g, buffer, lhs);
+
+ // Normally one wouldn't compute the inverse explicitly, but
+ // e_block_size will typically be a small number like 3, in
+ // which case its much faster to compute the inverse once and
+ // use it to multiply other matrices/vectors instead of doing a
+ // Solve call over and over again.
+ typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete =
+ ete
+ .template selfadjointView<Eigen::Upper>()
+ .ldlt()
+ .solve(Matrix::Identity(e_block_size, e_block_size));
+
+ // For the current chunk compute and update the rhs of the reduced
+ // linear system.
+ //
+ // rhs = F'b - F'E(E'E)^(-1) E'b
+ UpdateRhs(chunk, A, b, chunk.start, inverse_ete * g, rhs);
+
+ // S -= F'E(E'E)^{-1}E'F
+ ChunkOuterProduct(bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
+ }
+
+ // For rows with no e_blocks, the schur complement update reduces to
+ // S += F'F.
+ NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs);
+}
+
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+BackSubstitute(const BlockSparseMatrixBase* A,
+ const double* b,
+ const double* D,
+ const double* z,
+ double* y) {
+ const CompressedRowBlockStructure* bs = A->block_structure();
+#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
+ for (int i = 0; i < chunks_.size(); ++i) {
+ const Chunk& chunk = chunks_[i];
+ const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
+ const int e_block_size = bs->cols[e_block_id].size;
+
+ typename EigenTypes<kEBlockSize>::VectorRef y_block(
+ y + bs->cols[e_block_id].position, e_block_size);
+
+ typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
+ ete(e_block_size, e_block_size);
+ if (D != NULL) {
+ const typename EigenTypes<kEBlockSize>::ConstVectorRef
+ diag(D + bs->cols[e_block_id].position, e_block_size);
+ ete = diag.array().square().matrix().asDiagonal();
+ } else {
+ ete.setZero();
+ }
+
+ for (int j = 0; j < chunk.size; ++j) {
+ const CompressedRow& row = bs->rows[chunk.start + j];
+ const double* row_values = A->RowBlockValues(chunk.start + j);
+ const Cell& e_cell = row.cells.front();
+ DCHECK_EQ(e_block_id, e_cell.block_id);
+ const typename EigenTypes<kRowBlockSize, kEBlockSize>::ConstMatrixRef
+ e_block(row_values + e_cell.position,
+ row.block.size,
+ e_block_size);
+
+ typename EigenTypes<kRowBlockSize>::Vector
+ sj =
+ typename EigenTypes<kRowBlockSize>::ConstVectorRef
+ (b + bs->rows[chunk.start + j].block.position,
+ row.block.size);
+
+ for (int c = 1; c < row.cells.size(); ++c) {
+ const int f_block_id = row.cells[c].block_id;
+ const int f_block_size = bs->cols[f_block_id].size;
+ const typename EigenTypes<kRowBlockSize, kFBlockSize>::ConstMatrixRef
+ f_block(row_values + row.cells[c].position,
+ row.block.size, f_block_size);
+ const int r_block = f_block_id - num_eliminate_blocks_;
+
+ sj -= f_block *
+ typename EigenTypes<kFBlockSize>::ConstVectorRef
+ (z + lhs_row_layout_[r_block], f_block_size);
+ }
+
+ y_block += e_block.transpose() * sj;
+ ete.template selfadjointView<Eigen::Upper>()
+ .rankUpdate(e_block.transpose(), 1.0);
+ }
+
+ y_block =
+ ete
+ .template selfadjointView<Eigen::Upper>()
+ .ldlt()
+ .solve(y_block);
+ }
+}
+
+// Update the rhs of the reduced linear system. Compute
+//
+// F'b - F'E(E'E)^(-1) E'b
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+UpdateRhs(const Chunk& chunk,
+ const BlockSparseMatrixBase* A,
+ const double* b,
+ int row_block_counter,
+ const Vector& inverse_ete_g,
+ double* rhs) {
+ const CompressedRowBlockStructure* bs = A->block_structure();
+ const int e_block_size = inverse_ete_g.rows();
+ int b_pos = bs->rows[row_block_counter].block.position;
+ for (int j = 0; j < chunk.size; ++j) {
+ const CompressedRow& row = bs->rows[row_block_counter + j];
+ const double *row_values = A->RowBlockValues(row_block_counter + j);
+ const Cell& e_cell = row.cells.front();
+
+ const typename EigenTypes<kRowBlockSize, kEBlockSize>::ConstMatrixRef
+ e_block(row_values + e_cell.position,
+ row.block.size,
+ e_block_size);
+
+ const typename EigenTypes<kRowBlockSize>::Vector
+ sj =
+ typename EigenTypes<kRowBlockSize>::ConstVectorRef
+ (b + b_pos, row.block.size) - e_block * (inverse_ete_g);
+
+ for (int c = 1; c < row.cells.size(); ++c) {
+ const int block_id = row.cells[c].block_id;
+ const int block_size = bs->cols[block_id].size;
+ const typename EigenTypes<kRowBlockSize, kFBlockSize>::ConstMatrixRef
+ b(row_values + row.cells[c].position,
+ row.block.size, block_size);
+
+ const int block = block_id - num_eliminate_blocks_;
+ CeresMutexLock l(rhs_locks_[block]);
+ typename EigenTypes<kFBlockSize>::VectorRef
+ (rhs + lhs_row_layout_[block], block_size).noalias()
+ += b.transpose() * sj;
+ }
+ b_pos += row.block.size;
+ }
+}
+
+// Given a Chunk - set of rows with the same e_block, e.g. in the
+// following Chunk with two rows.
+//
+// E F
+// [ y11 0 0 0 | z11 0 0 0 z51]
+// [ y12 0 0 0 | z12 z22 0 0 0]
+//
+// this function computes twp matrices. The diagonal block matrix
+//
+// ete = y11 * y11' + y12 * y12'
+//
+// and the off diagonal blocks in the Guass Newton Hessian.
+//
+// buffer = [y11'(z11 + z12), y12' * z22, y11' * z51]
+//
+// which are zero compressed versions of the block sparse matrices E'E
+// and E'F.
+//
+// and the gradient of the e_block, E'b.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ChunkDiagonalBlockAndGradient(
+ const Chunk& chunk,
+ const BlockSparseMatrixBase* A,
+ const double* b,
+ int row_block_counter,
+ typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete,
+ typename EigenTypes<kEBlockSize>::Vector* g,
+ double* buffer,
+ BlockRandomAccessMatrix* lhs) {
+ const CompressedRowBlockStructure* bs = A->block_structure();
+
+ int b_pos = bs->rows[row_block_counter].block.position;
+ const int e_block_size = ete->rows();
+
+ // Iterate over the rows in this chunk, for each row, compute the
+ // contribution of its F blocks to the Schur complement, the
+ // contribution of its E block to the matrix EE' (ete), and the
+ // corresponding block in the gradient vector.
+ for (int j = 0; j < chunk.size; ++j) {
+ const CompressedRow& row = bs->rows[row_block_counter + j];
+ const double *row_values = A->RowBlockValues(row_block_counter + j);
+
+ if (row.cells.size() > 1) {
+ EBlockRowOuterProduct(A, row_block_counter + j, lhs);
+ }
+
+ // Extract the e_block, ETE += E_i' E_i
+ const Cell& e_cell = row.cells.front();
+ const typename EigenTypes<kRowBlockSize, kEBlockSize>::ConstMatrixRef
+ e_block(row_values + e_cell.position,
+ row.block.size,
+ e_block_size);
+
+ ete->template selfadjointView<Eigen::Upper>()
+ .rankUpdate(e_block.transpose(), 1.0);
+
+ // g += E_i' b_i
+ g->noalias() += e_block.transpose() *
+ typename EigenTypes<kRowBlockSize>::ConstVectorRef
+ (b + b_pos, row.block.size);
+
+ // buffer = E'F. This computation is done by iterating over the
+ // f_blocks for each row in the chunk.
+ for (int c = 1; c < row.cells.size(); ++c) {
+ const int f_block_id = row.cells[c].block_id;
+ const int f_block_size = bs->cols[f_block_id].size;
+ const typename EigenTypes<kRowBlockSize, kFBlockSize>::ConstMatrixRef
+ f_block(row_values + row.cells[c].position,
+ row.block.size, f_block_size);
+
+ double* buffer_ptr =
+ buffer + FindOrDie(chunk.buffer_layout, f_block_id);
+
+ typename EigenTypes<kEBlockSize, kFBlockSize>::MatrixRef
+ (buffer_ptr, e_block_size, f_block_size).noalias()
+ += e_block.transpose() * f_block;
+ }
+ b_pos += row.block.size;
+ }
+}
+
+// Compute the outer product F'E(E'E)^{-1}E'F and subtract it from the
+// Schur complement matrix, i.e
+//
+// S -= F'E(E'E)^{-1}E'F.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ChunkOuterProduct(const CompressedRowBlockStructure* bs,
+ const Matrix& inverse_ete,
+ const double* buffer,
+ const BufferLayoutType& buffer_layout,
+ BlockRandomAccessMatrix* lhs) {
+ // This is the most computationally expensive part of this
+ // code. Profiling experiments reveal that the bottleneck is not the
+ // computation of the right-hand matrix product, but memory
+ // references to the left hand side.
+ const int e_block_size = inverse_ete.rows();
+ BufferLayoutType::const_iterator it1 = buffer_layout.begin();
+ // S(i,j) -= bi' * ete^{-1} b_j
+ for (; it1 != buffer_layout.end(); ++it1) {
+ const int block1 = it1->first - num_eliminate_blocks_;
+ const int block1_size = bs->cols[it1->first].size;
+
+ const typename EigenTypes<kEBlockSize, kFBlockSize>::ConstMatrixRef
+ b1(buffer + it1->second, e_block_size, block1_size);
+ const typename EigenTypes<kFBlockSize, kEBlockSize>::Matrix
+ b1_transpose_inverse_ete = b1.transpose() * inverse_ete;
+
+ BufferLayoutType::const_iterator it2 = it1;
+ for (; it2 != buffer_layout.end(); ++it2) {
+ const int block2 = it2->first - num_eliminate_blocks_;
+
+ int r, c, row_stride, col_stride;
+ CellInfo* cell_info = lhs->GetCell(block1, block2,
+ &r, &c,
+ &row_stride, &col_stride);
+ if (cell_info == NULL) {
+ continue;
+ }
+
+ const int block2_size = bs->cols[it2->first].size;
+ const typename EigenTypes<kEBlockSize, kFBlockSize>::ConstMatrixRef
+ b2(buffer + it2->second, e_block_size, block2_size);
+
+ CeresMutexLock l(&cell_info->m);
+ MatrixRef m(cell_info->values, row_stride, col_stride);
+
+ // We explicitly construct a block object here instead of using
+ // m.block(), as m.block() variant of the constructor does not
+ // allow mixing of template sizing and runtime sizing parameters
+ // like the Matrix class does.
+ Eigen::Block<MatrixRef, kFBlockSize, kFBlockSize>
+ block(m, r, c, block1_size, block2_size);
+#ifdef CERES_WORK_AROUND_ANDROID_NDK_COMPILER_BUG
+ // Removing the ".noalias()" annotation on the following statement is
+ // necessary to produce a correct build with the Android NDK, including
+ // versions 6, 7, 8, and 8b, when built with STLPort and the
+ // non-standalone toolchain (i.e. ndk-build). This appears to be a
+ // compiler bug; if the workaround is not in place, the line
+ //
+ // block.noalias() -= b1_transpose_inverse_ete * b2;
+ //
+ // gets compiled to
+ //
+ // block.noalias() += b1_transpose_inverse_ete * b2;
+ //
+ // which breaks schur elimination. Introducing a temporary by removing the
+ // .noalias() annotation causes the issue to disappear. Tracking this
+ // issue down was tricky, since the test suite doesn't run when built with
+ // the non-standalone toolchain.
+ //
+ // TODO(keir): Make a reproduction case for this and send it upstream.
+ block -= b1_transpose_inverse_ete * b2;
+#else
+ block.noalias() -= b1_transpose_inverse_ete * b2;
+#endif // CERES_WORK_AROUND_ANDROID_NDK_COMPILER_BUG
+ }
+ }
+}
+
+// For rows with no e_blocks, the schur complement update reduces to S
+// += F'F. This function iterates over the rows of A with no e_block,
+// and calls NoEBlockRowOuterProduct on each row.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+NoEBlockRowsUpdate(const BlockSparseMatrixBase* A,
+ const double* b,
+ int row_block_counter,
+ BlockRandomAccessMatrix* lhs,
+ double* rhs) {
+ const CompressedRowBlockStructure* bs = A->block_structure();
+ for (; row_block_counter < bs->rows.size(); ++row_block_counter) {
+ const CompressedRow& row = bs->rows[row_block_counter];
+ const double *row_values = A->RowBlockValues(row_block_counter);
+ for (int c = 0; c < row.cells.size(); ++c) {
+ const int block_id = row.cells[c].block_id;
+ const int block_size = bs->cols[block_id].size;
+ const int block = block_id - num_eliminate_blocks_;
+ VectorRef(rhs + lhs_row_layout_[block], block_size).noalias()
+ += (ConstMatrixRef(row_values + row.cells[c].position,
+ row.block.size, block_size).transpose() *
+ ConstVectorRef(b + row.block.position, row.block.size));
+ }
+ NoEBlockRowOuterProduct(A, row_block_counter, lhs);
+ }
+}
+
+
+// A row r of A, which has no e_blocks gets added to the Schur
+// Complement as S += r r'. This function is responsible for computing
+// the contribution of a single row r to the Schur complement. It is
+// very similar in structure to EBlockRowOuterProduct except for
+// one difference. It does not use any of the template
+// parameters. This is because the algorithm used for detecting the
+// static structure of the matrix A only pays attention to rows with
+// e_blocks. This is becase rows without e_blocks are rare and
+// typically arise from regularization terms in the original
+// optimization problem, and have a very different structure than the
+// rows with e_blocks. Including them in the static structure
+// detection will lead to most template parameters being set to
+// dynamic. Since the number of rows without e_blocks is small, the
+// lack of templating is not an issue.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+NoEBlockRowOuterProduct(const BlockSparseMatrixBase* A,
+ int row_block_index,
+ BlockRandomAccessMatrix* lhs) {
+ const CompressedRowBlockStructure* bs = A->block_structure();
+ const CompressedRow& row = bs->rows[row_block_index];
+ const double *row_values = A->RowBlockValues(row_block_index);
+ for (int i = 0; i < row.cells.size(); ++i) {
+ const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
+ DCHECK_GE(block1, 0);
+
+ const int block1_size = bs->cols[row.cells[i].block_id].size;
+ const ConstMatrixRef b1(row_values + row.cells[i].position,
+ row.block.size, block1_size);
+ int r, c, row_stride, col_stride;
+ CellInfo* cell_info = lhs->GetCell(block1, block1,
+ &r, &c,
+ &row_stride, &col_stride);
+ if (cell_info != NULL) {
+ CeresMutexLock l(&cell_info->m);
+ MatrixRef m(cell_info->values, row_stride, col_stride);
+ m.block(r, c, block1_size, block1_size)
+ .selfadjointView<Eigen::Upper>()
+ .rankUpdate(b1.transpose(), 1.0);
+ }
+
+ for (int j = i + 1; j < row.cells.size(); ++j) {
+ const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
+ DCHECK_GE(block2, 0);
+ DCHECK_LT(block1, block2);
+ int r, c, row_stride, col_stride;
+ CellInfo* cell_info = lhs->GetCell(block1, block2,
+ &r, &c,
+ &row_stride, &col_stride);
+ if (cell_info == NULL) {
+ continue;
+ }
+
+ const int block2_size = bs->cols[row.cells[j].block_id].size;
+ CeresMutexLock l(&cell_info->m);
+ MatrixRef m(cell_info->values, row_stride, col_stride);
+ m.block(r, c, block1_size, block2_size).noalias() +=
+ b1.transpose() * ConstMatrixRef(row_values + row.cells[j].position,
+ row.block.size,
+ block2_size);
+ }
+ }
+}
+
+// For a row with an e_block, compute the contribition S += F'F. This
+// function has the same structure as NoEBlockRowOuterProduct, except
+// that this function uses the template parameters.
+template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
+void
+SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+EBlockRowOuterProduct(const BlockSparseMatrixBase* A,
+ int row_block_index,
+ BlockRandomAccessMatrix* lhs) {
+ const CompressedRowBlockStructure* bs = A->block_structure();
+ const CompressedRow& row = bs->rows[row_block_index];
+ const double *row_values = A->RowBlockValues(row_block_index);
+ for (int i = 1; i < row.cells.size(); ++i) {
+ const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
+ DCHECK_GE(block1, 0);
+
+ const int block1_size = bs->cols[row.cells[i].block_id].size;
+ const typename EigenTypes<kRowBlockSize, kFBlockSize>::ConstMatrixRef
+ b1(row_values + row.cells[i].position,
+ row.block.size, block1_size);
+ {
+ int r, c, row_stride, col_stride;
+ CellInfo* cell_info = lhs->GetCell(block1, block1,
+ &r, &c,
+ &row_stride, &col_stride);
+ if (cell_info == NULL) {
+ continue;
+ }
+
+ CeresMutexLock l(&cell_info->m);
+ MatrixRef m(cell_info->values, row_stride, col_stride);
+
+ Eigen::Block<MatrixRef, kFBlockSize, kFBlockSize>
+ block(m, r, c, block1_size, block1_size);
+ block.template selfadjointView<Eigen::Upper>()
+ .rankUpdate(b1.transpose(), 1.0);
+ }
+
+ for (int j = i + 1; j < row.cells.size(); ++j) {
+ const int block2 = row.cells[j].block_id - num_eliminate_blocks_;
+ DCHECK_GE(block2, 0);
+ DCHECK_LT(block1, block2);
+ const int block2_size = bs->cols[row.cells[j].block_id].size;
+ int r, c, row_stride, col_stride;
+ CellInfo* cell_info = lhs->GetCell(block1, block2,
+ &r, &c,
+ &row_stride, &col_stride);
+ if (cell_info == NULL) {
+ continue;
+ }
+
+ const typename EigenTypes<kRowBlockSize, kFBlockSize>::ConstMatrixRef
+ b2(row_values + row.cells[j].position,
+ row.block.size,
+ block2_size);
+
+ CeresMutexLock l(&cell_info->m);
+ MatrixRef m(cell_info->values, row_stride, col_stride);
+ Eigen::Block<MatrixRef, kFBlockSize, kFBlockSize>
+ block(m, r, c, block1_size, block2_size);
+ block.noalias() += b1.transpose() * b2;
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
diff --git a/internal/ceres/schur_eliminator_test.cc b/internal/ceres/schur_eliminator_test.cc
new file mode 100644
index 0000000..56db598
--- /dev/null
+++ b/internal/ceres/schur_eliminator_test.cc
@@ -0,0 +1,229 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/schur_eliminator.h"
+
+#include "Eigen/Dense"
+#include "ceres/block_random_access_dense_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/detect_structure.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/test_util.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+// TODO(sameeragarwal): Reduce the size of these tests and redo the
+// parameterization to be more efficient.
+
+namespace ceres {
+namespace internal {
+
+class SchurEliminatorTest : public ::testing::Test {
+ protected:
+ void SetUpFromId(int id) {
+ scoped_ptr<LinearLeastSquaresProblem>
+ problem(CreateLinearLeastSquaresProblemFromId(id));
+ CHECK_NOTNULL(problem.get());
+ SetupHelper(problem.get());
+ }
+
+ void SetUpFromFilename(const string& filename) {
+ scoped_ptr<LinearLeastSquaresProblem>
+ problem(CreateLinearLeastSquaresProblemFromFile(filename));
+ CHECK_NOTNULL(problem.get());
+ SetupHelper(problem.get());
+ }
+
+ void SetupHelper(LinearLeastSquaresProblem* problem) {
+ A.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+ b.reset(problem->b.release());
+ D.reset(problem->D.release());
+
+ num_eliminate_blocks = problem->num_eliminate_blocks;
+ num_eliminate_cols = 0;
+ const CompressedRowBlockStructure* bs = A->block_structure();
+
+ for (int i = 0; i < num_eliminate_blocks; ++i) {
+ num_eliminate_cols += bs->cols[i].size;
+ }
+ }
+
+ // Compute the golden values for the reduced linear system and the
+ // solution to the linear least squares problem using dense linear
+ // algebra.
+ void ComputeReferenceSolution(const Vector& D) {
+ Matrix J;
+ A->ToDenseMatrix(&J);
+ VectorRef f(b.get(), J.rows());
+
+ Matrix H = (D.cwiseProduct(D)).asDiagonal();
+ H.noalias() += J.transpose() * J;
+
+ const Vector g = J.transpose() * f;
+ const int schur_size = J.cols() - num_eliminate_cols;
+
+ lhs_expected.resize(schur_size, schur_size);
+ lhs_expected.setZero();
+
+ rhs_expected.resize(schur_size);
+ rhs_expected.setZero();
+
+ sol_expected.resize(J.cols());
+ sol_expected.setZero();
+
+ Matrix P = H.block(0, 0, num_eliminate_cols, num_eliminate_cols);
+ Matrix Q = H.block(0,
+ num_eliminate_cols,
+ num_eliminate_cols,
+ schur_size);
+ Matrix R = H.block(num_eliminate_cols,
+ num_eliminate_cols,
+ schur_size,
+ schur_size);
+ int row = 0;
+ const CompressedRowBlockStructure* bs = A->block_structure();
+ for (int i = 0; i < num_eliminate_blocks; ++i) {
+ const int block_size = bs->cols[i].size;
+ P.block(row, row, block_size, block_size) =
+ P
+ .block(row, row, block_size, block_size)
+ .ldlt()
+ .solve(Matrix::Identity(block_size, block_size));
+ row += block_size;
+ }
+
+ lhs_expected
+ .triangularView<Eigen::Upper>() = R - Q.transpose() * P * Q;
+ rhs_expected =
+ g.tail(schur_size) - Q.transpose() * P * g.head(num_eliminate_cols);
+ sol_expected = H.ldlt().solve(g);
+ }
+
+ void EliminateSolveAndCompare(const VectorRef& diagonal,
+ bool use_static_structure,
+ const double relative_tolerance) {
+ const CompressedRowBlockStructure* bs = A->block_structure();
+ const int num_col_blocks = bs->cols.size();
+ vector<int> blocks(num_col_blocks - num_eliminate_blocks, 0);
+ for (int i = num_eliminate_blocks; i < num_col_blocks; ++i) {
+ blocks[i - num_eliminate_blocks] = bs->cols[i].size;
+ }
+
+ BlockRandomAccessDenseMatrix lhs(blocks);
+
+ const int num_cols = A->num_cols();
+ const int schur_size = lhs.num_rows();
+
+ Vector rhs(schur_size);
+
+ LinearSolver::Options options;
+ options.elimination_groups.push_back(num_eliminate_blocks);
+ if (use_static_structure) {
+ DetectStructure(*bs,
+ num_eliminate_blocks,
+ &options.row_block_size,
+ &options.e_block_size,
+ &options.f_block_size);
+ }
+
+ scoped_ptr<SchurEliminatorBase> eliminator;
+ eliminator.reset(SchurEliminatorBase::Create(options));
+ eliminator->Init(num_eliminate_blocks, A->block_structure());
+ eliminator->Eliminate(A.get(), b.get(), diagonal.data(), &lhs, rhs.data());
+
+ MatrixRef lhs_ref(lhs.mutable_values(), lhs.num_rows(), lhs.num_cols());
+ Vector reduced_sol =
+ lhs_ref
+ .selfadjointView<Eigen::Upper>()
+ .ldlt()
+ .solve(rhs);
+
+ // Solution to the linear least squares problem.
+ Vector sol(num_cols);
+ sol.setZero();
+ sol.tail(schur_size) = reduced_sol;
+ eliminator->BackSubstitute(A.get(),
+ b.get(),
+ diagonal.data(),
+ reduced_sol.data(),
+ sol.data());
+
+ Matrix delta = (lhs_ref - lhs_expected).selfadjointView<Eigen::Upper>();
+ double diff = delta.norm();
+ EXPECT_NEAR(diff / lhs_expected.norm(), 0.0, relative_tolerance);
+ EXPECT_NEAR((rhs - rhs_expected).norm() / rhs_expected.norm(), 0.0,
+ relative_tolerance);
+ EXPECT_NEAR((sol - sol_expected).norm() / sol_expected.norm(), 0.0,
+ relative_tolerance);
+ }
+
+ scoped_ptr<BlockSparseMatrix> A;
+ scoped_array<double> b;
+ scoped_array<double> D;
+ int num_eliminate_blocks;
+ int num_eliminate_cols;
+
+ Matrix lhs_expected;
+ Vector rhs_expected;
+ Vector sol_expected;
+};
+
+TEST_F(SchurEliminatorTest, ScalarProblem) {
+ SetUpFromId(2);
+ Vector zero(A->num_cols());
+ zero.setZero();
+
+ ComputeReferenceSolution(VectorRef(zero.data(), A->num_cols()));
+ EliminateSolveAndCompare(VectorRef(zero.data(), A->num_cols()), true, 1e-14);
+ EliminateSolveAndCompare(VectorRef(zero.data(), A->num_cols()), false, 1e-14);
+
+ ComputeReferenceSolution(VectorRef(D.get(), A->num_cols()));
+ EliminateSolveAndCompare(VectorRef(D.get(), A->num_cols()), true, 1e-14);
+ EliminateSolveAndCompare(VectorRef(D.get(), A->num_cols()), false, 1e-14);
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+TEST_F(SchurEliminatorTest, BlockProblem) {
+ const string input_file = TestFileAbsolutePath("problem-6-1384-000.lsqp");
+
+ SetUpFromFilename(input_file);
+ ComputeReferenceSolution(VectorRef(D.get(), A->num_cols()));
+ EliminateSolveAndCompare(VectorRef(D.get(), A->num_cols()), true, 1e-10);
+ EliminateSolveAndCompare(VectorRef(D.get(), A->num_cols()), false, 1e-10);
+}
+#endif // CERES_NO_PROTOCOL_BUFFERS
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/scratch_evaluate_preparer.cc b/internal/ceres/scratch_evaluate_preparer.cc
new file mode 100644
index 0000000..6f0ceef
--- /dev/null
+++ b/internal/ceres/scratch_evaluate_preparer.cc
@@ -0,0 +1,78 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/scratch_evaluate_preparer.h"
+
+#include "ceres/parameter_block.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+
+namespace ceres {
+namespace internal {
+
+ScratchEvaluatePreparer* ScratchEvaluatePreparer::Create(
+ const Program &program,
+ int num_threads) {
+ ScratchEvaluatePreparer* preparers = new ScratchEvaluatePreparer[num_threads];
+ int max_derivatives_per_residual_block =
+ program.MaxDerivativesPerResidualBlock();
+ for (int i = 0; i < num_threads; i++) {
+ preparers[i].Init(max_derivatives_per_residual_block);
+ }
+ return preparers;
+}
+
+void ScratchEvaluatePreparer::Init(int max_derivatives_per_residual_block) {
+ jacobian_scratch_.reset(
+ new double[max_derivatives_per_residual_block]);
+}
+
+// Point the jacobian blocks into the scratch area of this evaluate preparer.
+void ScratchEvaluatePreparer::Prepare(const ResidualBlock* residual_block,
+ int /* residual_block_index */,
+ SparseMatrix* /* jacobian */,
+ double** jacobians) {
+ double* jacobian_block_cursor = jacobian_scratch_.get();
+ int num_residuals = residual_block->NumResiduals();
+ int num_parameter_blocks = residual_block->NumParameterBlocks();
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ const ParameterBlock* parameter_block =
+ residual_block->parameter_blocks()[j];
+ if (parameter_block->IsConstant()) {
+ jacobians[j] = NULL;
+ } else {
+ jacobians[j] = jacobian_block_cursor;
+ jacobian_block_cursor += num_residuals * parameter_block->LocalSize();
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/scratch_evaluate_preparer.h b/internal/ceres/scratch_evaluate_preparer.h
new file mode 100644
index 0000000..6b12708
--- /dev/null
+++ b/internal/ceres/scratch_evaluate_preparer.h
@@ -0,0 +1,69 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A scratch evaluate preparer provides temporary storage for the jacobians that
+// are created when running user-provided cost functions. The evaluator takes
+// care to avoid evaluating the jacobian for fixed parameters.
+
+#ifndef CERES_INTERNAL_SCRATCH_EVALUATE_PREPARER_H_
+#define CERES_INTERNAL_SCRATCH_EVALUATE_PREPARER_H_
+
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+class Program;
+class ResidualBlock;
+class SparseMatrix;
+
+class ScratchEvaluatePreparer {
+ public:
+ // Create num_threads ScratchEvaluatePreparers.
+ static ScratchEvaluatePreparer* Create(const Program &program,
+ int num_threads);
+
+ // EvaluatePreparer interface
+ void Init(int max_derivatives_per_residual_block);
+ void Prepare(const ResidualBlock* residual_block,
+ int residual_block_index,
+ SparseMatrix* jacobian,
+ double** jacobians);
+
+ private:
+ // Scratch space for the jacobians; each jacobian is packed one after another.
+ // There is enough scratch to hold all the jacobians for the largest residual.
+ scoped_array<double> jacobian_scratch_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SCRATCH_EVALUATE_PREPARER_H_
diff --git a/internal/ceres/solver.cc b/internal/ceres/solver.cc
new file mode 100644
index 0000000..450a710
--- /dev/null
+++ b/internal/ceres/solver.cc
@@ -0,0 +1,240 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+// sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/solver.h"
+
+#include <vector>
+#include "ceres/problem.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/solver_impl.h"
+#include "ceres/stringprintf.h"
+#include "ceres/wall_time.h"
+
+namespace ceres {
+
+Solver::Options::~Options() {
+ delete linear_solver_ordering;
+ delete inner_iteration_ordering;
+}
+
+Solver::~Solver() {}
+
+void Solver::Solve(const Solver::Options& options,
+ Problem* problem,
+ Solver::Summary* summary) {
+ double start_time_seconds = internal::WallTimeInSeconds();
+ internal::ProblemImpl* problem_impl =
+ CHECK_NOTNULL(problem)->problem_impl_.get();
+ internal::SolverImpl::Solve(options, problem_impl, summary);
+ summary->total_time_in_seconds =
+ internal::WallTimeInSeconds() - start_time_seconds;
+}
+
+void Solve(const Solver::Options& options,
+ Problem* problem,
+ Solver::Summary* summary) {
+ Solver solver;
+ solver.Solve(options, problem, summary);
+}
+
+Solver::Summary::Summary()
+ // Invalid values for most fields, to ensure that we are not
+ // accidentally reporting default values.
+ : termination_type(DID_NOT_RUN),
+ initial_cost(-1.0),
+ final_cost(-1.0),
+ fixed_cost(-1.0),
+ num_successful_steps(-1),
+ num_unsuccessful_steps(-1),
+ preprocessor_time_in_seconds(-1.0),
+ minimizer_time_in_seconds(-1.0),
+ postprocessor_time_in_seconds(-1.0),
+ total_time_in_seconds(-1.0),
+ num_parameter_blocks(-1),
+ num_parameters(-1),
+ num_residual_blocks(-1),
+ num_residuals(-1),
+ num_parameter_blocks_reduced(-1),
+ num_parameters_reduced(-1),
+ num_residual_blocks_reduced(-1),
+ num_residuals_reduced(-1),
+ num_threads_given(-1),
+ num_threads_used(-1),
+ num_linear_solver_threads_given(-1),
+ num_linear_solver_threads_used(-1),
+ linear_solver_type_given(SPARSE_NORMAL_CHOLESKY),
+ linear_solver_type_used(SPARSE_NORMAL_CHOLESKY),
+ preconditioner_type(IDENTITY),
+ trust_region_strategy_type(LEVENBERG_MARQUARDT),
+ sparse_linear_algebra_library(SUITE_SPARSE) {
+}
+
+string Solver::Summary::BriefReport() const {
+ string report = "Ceres Solver Report: ";
+ if (termination_type == DID_NOT_RUN) {
+ CHECK(!error.empty())
+ << "Solver terminated with DID_NOT_RUN but the solver did not "
+ << "return a reason. This is a Ceres error. Please report this "
+ << "to the Ceres team";
+ return report + "Termination: DID_NOT_RUN, because " + error;
+ }
+
+ internal::StringAppendF(&report, "Iterations: %d",
+ num_successful_steps + num_unsuccessful_steps);
+ internal::StringAppendF(&report, ", Initial cost: %e", initial_cost);
+
+ // If the solver failed or was aborted, then the final_cost has no
+ // meaning.
+ if (termination_type != NUMERICAL_FAILURE &&
+ termination_type != USER_ABORT) {
+ internal::StringAppendF(&report, ", Final cost: %e", final_cost);
+ }
+
+ internal::StringAppendF(&report, ", Termination: %s.",
+ SolverTerminationTypeToString(termination_type));
+ return report;
+};
+
+string Solver::Summary::FullReport() const {
+ string report =
+ "\n"
+ "Ceres Solver Report\n"
+ "-------------------\n";
+
+ if (termination_type == DID_NOT_RUN) {
+ internal::StringAppendF(&report, " Original\n");
+ internal::StringAppendF(&report, "Parameter blocks % 10d\n",
+ num_parameter_blocks);
+ internal::StringAppendF(&report, "Parameters % 10d\n",
+ num_parameters);
+ internal::StringAppendF(&report, "Residual blocks % 10d\n",
+ num_residual_blocks);
+ internal::StringAppendF(&report, "Residuals % 10d\n\n",
+ num_residuals);
+ } else {
+ internal::StringAppendF(&report, "%45s %21s\n", "Original", "Reduced");
+ internal::StringAppendF(&report, "Parameter blocks % 25d% 25d\n",
+ num_parameter_blocks, num_parameter_blocks_reduced);
+ internal::StringAppendF(&report, "Parameters % 25d% 25d\n",
+ num_parameters, num_parameters_reduced);
+ internal::StringAppendF(&report, "Residual blocks % 25d% 25d\n",
+ num_residual_blocks, num_residual_blocks_reduced);
+ internal::StringAppendF(&report, "Residual % 25d% 25d\n\n",
+ num_residuals, num_residuals_reduced);
+ }
+
+ internal::StringAppendF(&report, "%45s %21s\n", "Given", "Used");
+ internal::StringAppendF(&report, "Linear solver %25s%25s\n",
+ LinearSolverTypeToString(linear_solver_type_given),
+ LinearSolverTypeToString(linear_solver_type_used));
+
+ if (linear_solver_type_given == CGNR ||
+ linear_solver_type_given == ITERATIVE_SCHUR) {
+ internal::StringAppendF(&report, "Preconditioner %25s%25s\n",
+ PreconditionerTypeToString(preconditioner_type),
+ PreconditionerTypeToString(preconditioner_type));
+ } else {
+ internal::StringAppendF(&report, "Preconditioner %25s%25s\n",
+ "N/A", "N/A");
+ }
+
+ // TODO(sameeragarwal): Add support for logging the ordering object.
+ internal::StringAppendF(&report, "Threads: % 25d% 25d\n",
+ num_threads_given, num_threads_used);
+ internal::StringAppendF(&report, "Linear solver threads % 23d% 25d\n",
+ num_linear_solver_threads_given,
+ num_linear_solver_threads_used);
+
+ if (linear_solver_type_used == SPARSE_NORMAL_CHOLESKY ||
+ linear_solver_type_used == SPARSE_SCHUR ||
+ (linear_solver_type_used == ITERATIVE_SCHUR &&
+ (preconditioner_type == SCHUR_JACOBI ||
+ preconditioner_type == CLUSTER_JACOBI ||
+ preconditioner_type == CLUSTER_TRIDIAGONAL))) {
+ internal::StringAppendF(&report, "\nSparse Linear Algebra Library %15s\n",
+ SparseLinearAlgebraLibraryTypeToString(
+ sparse_linear_algebra_library));
+ }
+
+ internal::StringAppendF(&report, "Trust Region Strategy %19s",
+ TrustRegionStrategyTypeToString(
+ trust_region_strategy_type));
+ if (trust_region_strategy_type == DOGLEG) {
+ if (dogleg_type == TRADITIONAL_DOGLEG) {
+ internal::StringAppendF(&report, " (TRADITIONAL)");
+ } else {
+ internal::StringAppendF(&report, " (SUBSPACE)");
+ }
+ }
+ internal::StringAppendF(&report, "\n");
+
+
+ if (termination_type == DID_NOT_RUN) {
+ CHECK(!error.empty())
+ << "Solver terminated with DID_NOT_RUN but the solver did not "
+ << "return a reason. This is a Ceres error. Please report this "
+ << "to the Ceres team";
+ internal::StringAppendF(&report, "Termination: %20s\n",
+ "DID_NOT_RUN");
+ internal::StringAppendF(&report, "Reason: %s\n", error.c_str());
+ return report;
+ }
+
+ internal::StringAppendF(&report, "\nCost:\n");
+ internal::StringAppendF(&report, "Initial % 30e\n", initial_cost);
+ if (termination_type != NUMERICAL_FAILURE && termination_type != USER_ABORT) {
+ internal::StringAppendF(&report, "Final % 30e\n", final_cost);
+ internal::StringAppendF(&report, "Change % 30e\n",
+ initial_cost - final_cost);
+ }
+
+ internal::StringAppendF(&report, "\nNumber of iterations:\n");
+ internal::StringAppendF(&report, "Successful % 20d\n",
+ num_successful_steps);
+ internal::StringAppendF(&report, "Unsuccessful % 20d\n",
+ num_unsuccessful_steps);
+ internal::StringAppendF(&report, "Total % 20d\n",
+ num_successful_steps + num_unsuccessful_steps);
+ internal::StringAppendF(&report, "\nTime (in seconds):\n");
+ internal::StringAppendF(&report, "Preprocessor % 25e\n",
+ preprocessor_time_in_seconds);
+ internal::StringAppendF(&report, "Minimizer % 25e\n",
+ minimizer_time_in_seconds);
+ internal::StringAppendF(&report, "Total % 25e\n",
+ total_time_in_seconds);
+
+ internal::StringAppendF(&report, "Termination: %25s\n",
+ SolverTerminationTypeToString(termination_type));
+ return report;
+};
+
+} // namespace ceres
diff --git a/internal/ceres/solver_impl.cc b/internal/ceres/solver_impl.cc
new file mode 100644
index 0000000..64e0f8e
--- /dev/null
+++ b/internal/ceres/solver_impl.cc
@@ -0,0 +1,1100 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include "ceres/solver_impl.h"
+
+#include <cstdio>
+#include <iostream> // NOLINT
+#include <numeric>
+#include "ceres/coordinate_descent_minimizer.h"
+#include "ceres/evaluator.h"
+#include "ceres/gradient_checking_cost_function.h"
+#include "ceres/iteration_callback.h"
+#include "ceres/levenberg_marquardt_strategy.h"
+#include "ceres/linear_solver.h"
+#include "ceres/map_util.h"
+#include "ceres/minimizer.h"
+#include "ceres/ordered_groups.h"
+#include "ceres/parameter_block.h"
+#include "ceres/parameter_block_ordering.h"
+#include "ceres/problem.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/stringprintf.h"
+#include "ceres/trust_region_minimizer.h"
+#include "ceres/wall_time.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// Callback for updating the user's parameter blocks. Updates are only
+// done if the step is successful.
+class StateUpdatingCallback : public IterationCallback {
+ public:
+ StateUpdatingCallback(Program* program, double* parameters)
+ : program_(program), parameters_(parameters) {}
+
+ CallbackReturnType operator()(const IterationSummary& summary) {
+ if (summary.step_is_successful) {
+ program_->StateVectorToParameterBlocks(parameters_);
+ program_->CopyParameterBlockStateToUserState();
+ }
+ return SOLVER_CONTINUE;
+ }
+
+ private:
+ Program* program_;
+ double* parameters_;
+};
+
+// Callback for logging the state of the minimizer to STDERR or STDOUT
+// depending on the user's preferences and logging level.
+class LoggingCallback : public IterationCallback {
+ public:
+ explicit LoggingCallback(bool log_to_stdout)
+ : log_to_stdout_(log_to_stdout) {}
+
+ ~LoggingCallback() {}
+
+ CallbackReturnType operator()(const IterationSummary& summary) {
+ const char* kReportRowFormat =
+ "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
+ "rho:% 3.2e mu:% 3.2e li:% 3d it:% 3.2e tt:% 3.2e";
+ string output = StringPrintf(kReportRowFormat,
+ summary.iteration,
+ summary.cost,
+ summary.cost_change,
+ summary.gradient_max_norm,
+ summary.step_norm,
+ summary.relative_decrease,
+ summary.trust_region_radius,
+ summary.linear_solver_iterations,
+ summary.iteration_time_in_seconds,
+ summary.cumulative_time_in_seconds);
+ if (log_to_stdout_) {
+ cout << output << endl;
+ } else {
+ VLOG(1) << output;
+ }
+ return SOLVER_CONTINUE;
+ }
+
+ private:
+ const bool log_to_stdout_;
+};
+
+// Basic callback to record the execution of the solver to a file for
+// offline analysis.
+class FileLoggingCallback : public IterationCallback {
+ public:
+ explicit FileLoggingCallback(const string& filename)
+ : fptr_(NULL) {
+ fptr_ = fopen(filename.c_str(), "w");
+ CHECK_NOTNULL(fptr_);
+ }
+
+ virtual ~FileLoggingCallback() {
+ if (fptr_ != NULL) {
+ fclose(fptr_);
+ }
+ }
+
+ virtual CallbackReturnType operator()(const IterationSummary& summary) {
+ fprintf(fptr_,
+ "%4d %e %e\n",
+ summary.iteration,
+ summary.cost,
+ summary.cumulative_time_in_seconds);
+ return SOLVER_CONTINUE;
+ }
+ private:
+ FILE* fptr_;
+};
+
+} // namespace
+
+void SolverImpl::Minimize(const Solver::Options& options,
+ Program* program,
+ CoordinateDescentMinimizer* inner_iteration_minimizer,
+ Evaluator* evaluator,
+ LinearSolver* linear_solver,
+ double* parameters,
+ Solver::Summary* summary) {
+ Minimizer::Options minimizer_options(options);
+
+ // TODO(sameeragarwal): Add support for logging the configuration
+ // and more detailed stats.
+ scoped_ptr<IterationCallback> file_logging_callback;
+ if (!options.solver_log.empty()) {
+ file_logging_callback.reset(new FileLoggingCallback(options.solver_log));
+ minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
+ file_logging_callback.get());
+ }
+
+ LoggingCallback logging_callback(options.minimizer_progress_to_stdout);
+ if (options.logging_type != SILENT) {
+ minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
+ &logging_callback);
+ }
+
+ StateUpdatingCallback updating_callback(program, parameters);
+ if (options.update_state_every_iteration) {
+ // This must get pushed to the front of the callbacks so that it is run
+ // before any of the user callbacks.
+ minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
+ &updating_callback);
+ }
+
+ minimizer_options.evaluator = evaluator;
+ scoped_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
+ minimizer_options.jacobian = jacobian.get();
+ minimizer_options.inner_iteration_minimizer = inner_iteration_minimizer;
+
+ TrustRegionStrategy::Options trust_region_strategy_options;
+ trust_region_strategy_options.linear_solver = linear_solver;
+ trust_region_strategy_options.initial_radius =
+ options.initial_trust_region_radius;
+ trust_region_strategy_options.max_radius = options.max_trust_region_radius;
+ trust_region_strategy_options.lm_min_diagonal = options.lm_min_diagonal;
+ trust_region_strategy_options.lm_max_diagonal = options.lm_max_diagonal;
+ trust_region_strategy_options.trust_region_strategy_type =
+ options.trust_region_strategy_type;
+ trust_region_strategy_options.dogleg_type = options.dogleg_type;
+ scoped_ptr<TrustRegionStrategy> strategy(
+ TrustRegionStrategy::Create(trust_region_strategy_options));
+ minimizer_options.trust_region_strategy = strategy.get();
+
+ TrustRegionMinimizer minimizer;
+ double minimizer_start_time = WallTimeInSeconds();
+ minimizer.Minimize(minimizer_options, parameters, summary);
+ summary->minimizer_time_in_seconds =
+ WallTimeInSeconds() - minimizer_start_time;
+}
+
+void SolverImpl::Solve(const Solver::Options& original_options,
+ ProblemImpl* original_problem_impl,
+ Solver::Summary* summary) {
+ double solver_start_time = WallTimeInSeconds();
+
+ Program* original_program = original_problem_impl->mutable_program();
+ ProblemImpl* problem_impl = original_problem_impl;
+
+ // Reset the summary object to its default values.
+ *CHECK_NOTNULL(summary) = Solver::Summary();
+
+ summary->num_parameter_blocks = problem_impl->NumParameterBlocks();
+ summary->num_parameters = problem_impl->NumParameters();
+ summary->num_residual_blocks = problem_impl->NumResidualBlocks();
+ summary->num_residuals = problem_impl->NumResiduals();
+
+ // Empty programs are usually a user error.
+ if (summary->num_parameter_blocks == 0) {
+ summary->error = "Problem contains no parameter blocks.";
+ LOG(ERROR) << summary->error;
+ return;
+ }
+
+ if (summary->num_residual_blocks == 0) {
+ summary->error = "Problem contains no residual blocks.";
+ LOG(ERROR) << summary->error;
+ return;
+ }
+
+ Solver::Options options(original_options);
+ options.linear_solver_ordering = NULL;
+ options.inner_iteration_ordering = NULL;
+
+#ifndef CERES_USE_OPENMP
+ if (options.num_threads > 1) {
+ LOG(WARNING)
+ << "OpenMP support is not compiled into this binary; "
+ << "only options.num_threads=1 is supported. Switching "
+ << "to single threaded mode.";
+ options.num_threads = 1;
+ }
+ if (options.num_linear_solver_threads > 1) {
+ LOG(WARNING)
+ << "OpenMP support is not compiled into this binary; "
+ << "only options.num_linear_solver_threads=1 is supported. Switching "
+ << "to single threaded mode.";
+ options.num_linear_solver_threads = 1;
+ }
+#endif
+
+ summary->num_threads_given = original_options.num_threads;
+ summary->num_threads_used = options.num_threads;
+
+ if (options.lsqp_iterations_to_dump.size() > 0) {
+ LOG(WARNING) << "Dumping linear least squares problems to disk is"
+ " currently broken. Ignoring Solver::Options::lsqp_iterations_to_dump";
+ }
+
+ // Evaluate the initial cost, residual vector and the jacobian
+ // matrix if requested by the user. The initial cost needs to be
+ // computed on the original unpreprocessed problem, as it is used to
+ // determine the value of the "fixed" part of the objective function
+ // after the problem has undergone reduction.
+ if (!Evaluator::Evaluate(original_program,
+ options.num_threads,
+ &(summary->initial_cost),
+ options.return_initial_residuals
+ ? &summary->initial_residuals
+ : NULL,
+ options.return_initial_gradient
+ ? &summary->initial_gradient
+ : NULL,
+ options.return_initial_jacobian
+ ? &summary->initial_jacobian
+ : NULL)) {
+ summary->termination_type = NUMERICAL_FAILURE;
+ summary->error = "Unable to evaluate the initial cost.";
+ LOG(ERROR) << summary->error;
+ return;
+ }
+
+ original_program->SetParameterBlockStatePtrsToUserStatePtrs();
+
+ // If the user requests gradient checking, construct a new
+ // ProblemImpl by wrapping the CostFunctions of problem_impl inside
+ // GradientCheckingCostFunction and replacing problem_impl with
+ // gradient_checking_problem_impl.
+ scoped_ptr<ProblemImpl> gradient_checking_problem_impl;
+ if (options.check_gradients) {
+ VLOG(1) << "Checking Gradients";
+ gradient_checking_problem_impl.reset(
+ CreateGradientCheckingProblemImpl(
+ problem_impl,
+ options.numeric_derivative_relative_step_size,
+ options.gradient_check_relative_precision));
+
+ // From here on, problem_impl will point to the gradient checking
+ // version.
+ problem_impl = gradient_checking_problem_impl.get();
+ }
+
+ if (original_options.linear_solver_ordering != NULL) {
+ if (!IsOrderingValid(original_options, problem_impl, &summary->error)) {
+ LOG(ERROR) << summary->error;
+ return;
+ }
+ options.linear_solver_ordering =
+ new ParameterBlockOrdering(*original_options.linear_solver_ordering);
+ } else {
+ options.linear_solver_ordering = new ParameterBlockOrdering;
+ const ProblemImpl::ParameterMap& parameter_map =
+ problem_impl->parameter_map();
+ for (ProblemImpl::ParameterMap::const_iterator it = parameter_map.begin();
+ it != parameter_map.end();
+ ++it) {
+ options.linear_solver_ordering->AddElementToGroup(it->first, 0);
+ }
+ }
+
+ // Create the three objects needed to minimize: the transformed program, the
+ // evaluator, and the linear solver.
+ scoped_ptr<Program> reduced_program(CreateReducedProgram(&options,
+ problem_impl,
+ &summary->fixed_cost,
+ &summary->error));
+ if (reduced_program == NULL) {
+ return;
+ }
+
+ summary->num_parameter_blocks_reduced = reduced_program->NumParameterBlocks();
+ summary->num_parameters_reduced = reduced_program->NumParameters();
+ summary->num_residual_blocks_reduced = reduced_program->NumResidualBlocks();
+ summary->num_residuals_reduced = reduced_program->NumResiduals();
+
+ if (summary->num_parameter_blocks_reduced == 0) {
+ summary->preprocessor_time_in_seconds =
+ WallTimeInSeconds() - solver_start_time;
+
+ LOG(INFO) << "Terminating: FUNCTION_TOLERANCE reached. "
+ << "No non-constant parameter blocks found.";
+
+ // FUNCTION_TOLERANCE is the right convergence here, as we know
+ // that the objective function is constant and cannot be changed
+ // any further.
+ summary->termination_type = FUNCTION_TOLERANCE;
+
+ double post_process_start_time = WallTimeInSeconds();
+ // Evaluate the final cost, residual vector and the jacobian
+ // matrix if requested by the user.
+ if (!Evaluator::Evaluate(original_program,
+ options.num_threads,
+ &summary->final_cost,
+ options.return_final_residuals
+ ? &summary->final_residuals
+ : NULL,
+ options.return_final_gradient
+ ? &summary->final_gradient
+ : NULL,
+ options.return_final_jacobian
+ ? &summary->final_jacobian
+ : NULL)) {
+ summary->termination_type = NUMERICAL_FAILURE;
+ summary->error = "Unable to evaluate the final cost.";
+ LOG(ERROR) << summary->error;
+ return;
+ }
+
+ // Ensure the program state is set to the user parameters on the way out.
+ original_program->SetParameterBlockStatePtrsToUserStatePtrs();
+
+ summary->postprocessor_time_in_seconds =
+ WallTimeInSeconds() - post_process_start_time;
+ return;
+ }
+
+ scoped_ptr<LinearSolver>
+ linear_solver(CreateLinearSolver(&options, &summary->error));
+ if (linear_solver == NULL) {
+ return;
+ }
+
+ summary->linear_solver_type_given = original_options.linear_solver_type;
+ summary->linear_solver_type_used = options.linear_solver_type;
+
+ summary->preconditioner_type = options.preconditioner_type;
+
+ summary->num_linear_solver_threads_given =
+ original_options.num_linear_solver_threads;
+ summary->num_linear_solver_threads_used = options.num_linear_solver_threads;
+
+ summary->sparse_linear_algebra_library =
+ options.sparse_linear_algebra_library;
+
+ summary->trust_region_strategy_type = options.trust_region_strategy_type;
+ summary->dogleg_type = options.dogleg_type;
+
+ // Only Schur types require the lexicographic reordering.
+ if (IsSchurType(options.linear_solver_type)) {
+ const int num_eliminate_blocks =
+ options.linear_solver_ordering
+ ->group_to_elements().begin()
+ ->second.size();
+ if (!LexicographicallyOrderResidualBlocks(num_eliminate_blocks,
+ reduced_program.get(),
+ &summary->error)) {
+ return;
+ }
+ }
+
+ scoped_ptr<Evaluator> evaluator(CreateEvaluator(options,
+ problem_impl->parameter_map(),
+ reduced_program.get(),
+ &summary->error));
+ if (evaluator == NULL) {
+ return;
+ }
+
+ scoped_ptr<CoordinateDescentMinimizer> inner_iteration_minimizer;
+ if (options.use_inner_iterations) {
+ if (reduced_program->parameter_blocks().size() < 2) {
+ LOG(WARNING) << "Reduced problem only contains one parameter block."
+ << "Disabling inner iterations.";
+ } else {
+ inner_iteration_minimizer.reset(
+ CreateInnerIterationMinimizer(original_options,
+ *reduced_program,
+ problem_impl->parameter_map(),
+ &summary->error));
+ if (inner_iteration_minimizer == NULL) {
+ LOG(ERROR) << summary->error;
+ return;
+ }
+ }
+ }
+
+ // The optimizer works on contiguous parameter vectors; allocate some.
+ Vector parameters(reduced_program->NumParameters());
+
+ // Collect the discontiguous parameters into a contiguous state vector.
+ reduced_program->ParameterBlocksToStateVector(parameters.data());
+
+ Vector original_parameters = parameters;
+
+ double minimizer_start_time = WallTimeInSeconds();
+ summary->preprocessor_time_in_seconds =
+ minimizer_start_time - solver_start_time;
+
+ // Run the optimization.
+ Minimize(options,
+ reduced_program.get(),
+ inner_iteration_minimizer.get(),
+ evaluator.get(),
+ linear_solver.get(),
+ parameters.data(),
+ summary);
+
+ // If the user aborted mid-optimization or the optimization
+ // terminated because of a numerical failure, then return without
+ // updating user state.
+ if (summary->termination_type == USER_ABORT ||
+ summary->termination_type == NUMERICAL_FAILURE) {
+ return;
+ }
+
+ double post_process_start_time = WallTimeInSeconds();
+
+ // Push the contiguous optimized parameters back to the user's parameters.
+ reduced_program->StateVectorToParameterBlocks(parameters.data());
+ reduced_program->CopyParameterBlockStateToUserState();
+
+ // Evaluate the final cost, residual vector and the jacobian
+ // matrix if requested by the user.
+ if (!Evaluator::Evaluate(original_program,
+ options.num_threads,
+ &summary->final_cost,
+ options.return_final_residuals
+ ? &summary->final_residuals
+ : NULL,
+ options.return_final_gradient
+ ? &summary->final_gradient
+ : NULL,
+ options.return_final_jacobian
+ ? &summary->final_jacobian
+ : NULL)) {
+ // This failure requires careful handling.
+ //
+ // At this point, we have modified the user's state, but the
+ // evaluation failed and we inform him of NUMERICAL_FAILURE. Ceres
+ // guarantees that user's state is not modified if the solver
+ // returns with NUMERICAL_FAILURE. Thus, we need to restore the
+ // user's state to their original values.
+
+ reduced_program->StateVectorToParameterBlocks(original_parameters.data());
+ reduced_program->CopyParameterBlockStateToUserState();
+
+ summary->termination_type = NUMERICAL_FAILURE;
+ summary->error = "Unable to evaluate the final cost.";
+ LOG(ERROR) << summary->error;
+ return;
+ }
+
+ // Ensure the program state is set to the user parameters on the way out.
+ original_program->SetParameterBlockStatePtrsToUserStatePtrs();
+
+ // Stick a fork in it, we're done.
+ summary->postprocessor_time_in_seconds =
+ WallTimeInSeconds() - post_process_start_time;
+}
+
+bool SolverImpl::IsOrderingValid(const Solver::Options& options,
+ const ProblemImpl* problem_impl,
+ string* error) {
+ if (options.linear_solver_ordering->NumElements() !=
+ problem_impl->NumParameterBlocks()) {
+ *error = "Number of parameter blocks in user supplied ordering "
+ "does not match the number of parameter blocks in the problem";
+ return false;
+ }
+
+ const Program& program = problem_impl->program();
+ const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
+ for (vector<ParameterBlock*>::const_iterator it = parameter_blocks.begin();
+ it != parameter_blocks.end();
+ ++it) {
+ if (!options.linear_solver_ordering
+ ->IsMember(const_cast<double*>((*it)->user_state()))) {
+ *error = "Problem contains a parameter block that is not in "
+ "the user specified ordering.";
+ return false;
+ }
+ }
+
+ if (IsSchurType(options.linear_solver_type) &&
+ options.linear_solver_ordering->NumGroups() > 1) {
+ const vector<ResidualBlock*>& residual_blocks = program.residual_blocks();
+ const set<double*>& e_blocks =
+ options.linear_solver_ordering->group_to_elements().begin()->second;
+ if (!IsParameterBlockSetIndependent(e_blocks, residual_blocks)) {
+ *error = "The user requested the use of a Schur type solver. "
+ "But the first elimination group in the ordering is not an "
+ "independent set.";
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SolverImpl::IsParameterBlockSetIndependent(const set<double*>& parameter_block_ptrs,
+ const vector<ResidualBlock*>& residual_blocks) {
+ // Loop over each residual block and ensure that no two parameter
+ // blocks in the same residual block are part of
+ // parameter_block_ptrs as that would violate the assumption that it
+ // is an independent set in the Hessian matrix.
+ for (vector<ResidualBlock*>::const_iterator it = residual_blocks.begin();
+ it != residual_blocks.end();
+ ++it) {
+ ParameterBlock* const* parameter_blocks = (*it)->parameter_blocks();
+ const int num_parameter_blocks = (*it)->NumParameterBlocks();
+ int count = 0;
+ for (int i = 0; i < num_parameter_blocks; ++i) {
+ count += parameter_block_ptrs.count(
+ parameter_blocks[i]->mutable_user_state());
+ }
+ if (count > 1) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+// Strips varying parameters and residuals, maintaining order, and updating
+// num_eliminate_blocks.
+bool SolverImpl::RemoveFixedBlocksFromProgram(Program* program,
+ ParameterBlockOrdering* ordering,
+ double* fixed_cost,
+ string* error) {
+ vector<ParameterBlock*>* parameter_blocks =
+ program->mutable_parameter_blocks();
+
+ scoped_array<double> residual_block_evaluate_scratch;
+ if (fixed_cost != NULL) {
+ residual_block_evaluate_scratch.reset(
+ new double[program->MaxScratchDoublesNeededForEvaluate()]);
+ *fixed_cost = 0.0;
+ }
+
+ // Mark all the parameters as unused. Abuse the index member of the parameter
+ // blocks for the marking.
+ for (int i = 0; i < parameter_blocks->size(); ++i) {
+ (*parameter_blocks)[i]->set_index(-1);
+ }
+
+ // Filter out residual that have all-constant parameters, and mark all the
+ // parameter blocks that appear in residuals.
+ {
+ vector<ResidualBlock*>* residual_blocks =
+ program->mutable_residual_blocks();
+ int j = 0;
+ for (int i = 0; i < residual_blocks->size(); ++i) {
+ ResidualBlock* residual_block = (*residual_blocks)[i];
+ int num_parameter_blocks = residual_block->NumParameterBlocks();
+
+ // Determine if the residual block is fixed, and also mark varying
+ // parameters that appear in the residual block.
+ bool all_constant = true;
+ for (int k = 0; k < num_parameter_blocks; k++) {
+ ParameterBlock* parameter_block = residual_block->parameter_blocks()[k];
+ if (!parameter_block->IsConstant()) {
+ all_constant = false;
+ parameter_block->set_index(1);
+ }
+ }
+
+ if (!all_constant) {
+ (*residual_blocks)[j++] = (*residual_blocks)[i];
+ } else if (fixed_cost != NULL) {
+ // The residual is constant and will be removed, so its cost is
+ // added to the variable fixed_cost.
+ double cost = 0.0;
+ if (!residual_block->Evaluate(
+ &cost, NULL, NULL, residual_block_evaluate_scratch.get())) {
+ *error = StringPrintf("Evaluation of the residual %d failed during "
+ "removal of fixed residual blocks.", i);
+ return false;
+ }
+ *fixed_cost += cost;
+ }
+ }
+ residual_blocks->resize(j);
+ }
+
+ // Filter out unused or fixed parameter blocks, and update
+ // the ordering.
+ {
+ vector<ParameterBlock*>* parameter_blocks =
+ program->mutable_parameter_blocks();
+ int j = 0;
+ for (int i = 0; i < parameter_blocks->size(); ++i) {
+ ParameterBlock* parameter_block = (*parameter_blocks)[i];
+ if (parameter_block->index() == 1) {
+ (*parameter_blocks)[j++] = parameter_block;
+ } else {
+ ordering->Remove(parameter_block->mutable_user_state());
+ }
+ }
+ parameter_blocks->resize(j);
+ }
+
+ CHECK(((program->NumResidualBlocks() == 0) &&
+ (program->NumParameterBlocks() == 0)) ||
+ ((program->NumResidualBlocks() != 0) &&
+ (program->NumParameterBlocks() != 0)))
+ << "Congratulations, you found a bug in Ceres. Please report it.";
+ return true;
+}
+
+Program* SolverImpl::CreateReducedProgram(Solver::Options* options,
+ ProblemImpl* problem_impl,
+ double* fixed_cost,
+ string* error) {
+ CHECK_NOTNULL(options->linear_solver_ordering);
+ Program* original_program = problem_impl->mutable_program();
+ scoped_ptr<Program> transformed_program(new Program(*original_program));
+ ParameterBlockOrdering* linear_solver_ordering =
+ options->linear_solver_ordering;
+
+ const int min_group_id =
+ linear_solver_ordering->group_to_elements().begin()->first;
+ const int original_num_groups = linear_solver_ordering->NumGroups();
+
+ if (!RemoveFixedBlocksFromProgram(transformed_program.get(),
+ linear_solver_ordering,
+ fixed_cost,
+ error)) {
+ return NULL;
+ }
+
+ if (transformed_program->NumParameterBlocks() == 0) {
+ if (transformed_program->NumResidualBlocks() > 0) {
+ *error = "Zero parameter blocks but non-zero residual blocks"
+ " in the reduced program. Congratulations, you found a "
+ "Ceres bug! Please report this error to the developers.";
+ return NULL;
+ }
+
+ LOG(WARNING) << "No varying parameter blocks to optimize; "
+ << "bailing early.";
+ return transformed_program.release();
+ }
+
+ // If the user supplied an linear_solver_ordering with just one
+ // group, it is equivalent to the user supplying NULL as
+ // ordering. Ceres is completely free to choose the parameter block
+ // ordering as it sees fit. For Schur type solvers, this means that
+ // the user wishes for Ceres to identify the e_blocks, which we do
+ // by computing a maximal independent set.
+ if (original_num_groups == 1 && IsSchurType(options->linear_solver_type)) {
+ vector<ParameterBlock*> schur_ordering;
+ const int num_eliminate_blocks = ComputeSchurOrdering(*transformed_program,
+ &schur_ordering);
+ CHECK_EQ(schur_ordering.size(), transformed_program->NumParameterBlocks())
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+
+ for (int i = 0; i < schur_ordering.size(); ++i) {
+ linear_solver_ordering->AddElementToGroup(
+ schur_ordering[i]->mutable_user_state(),
+ (i < num_eliminate_blocks) ? 0 : 1);
+ }
+ }
+
+ if (!ApplyUserOrdering(problem_impl->parameter_map(),
+ linear_solver_ordering,
+ transformed_program.get(),
+ error)) {
+ return NULL;
+ }
+
+ // If the user requested the use of a Schur type solver, and
+ // supplied a non-NULL linear_solver_ordering object with more than
+ // one elimination group, then it can happen that after all the
+ // parameter blocks which are fixed or unused have been removed from
+ // the program and the ordering, there are no more parameter blocks
+ // in the first elimination group.
+ //
+ // In such a case, the use of a Schur type solver is not possible,
+ // as they assume there is at least one e_block. Thus, we
+ // automatically switch to one of the other solvers, depending on
+ // the user's indicated preferences.
+ if (IsSchurType(options->linear_solver_type) &&
+ original_num_groups > 1 &&
+ linear_solver_ordering->GroupSize(min_group_id) == 0) {
+ string msg = "No e_blocks remaining. Switching from ";
+ if (options->linear_solver_type == SPARSE_SCHUR) {
+ options->linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+ msg += "SPARSE_SCHUR to SPARSE_NORMAL_CHOLESKY.";
+ } else if (options->linear_solver_type == DENSE_SCHUR) {
+ // TODO(sameeragarwal): This is probably not a great choice.
+ // Ideally, we should have a DENSE_NORMAL_CHOLESKY, that can
+ // take a BlockSparseMatrix as input.
+ options->linear_solver_type = DENSE_QR;
+ msg += "DENSE_SCHUR to DENSE_QR.";
+ } else if (options->linear_solver_type == ITERATIVE_SCHUR) {
+ msg += StringPrintf("ITERATIVE_SCHUR with %s preconditioner "
+ "to CGNR with JACOBI preconditioner.",
+ PreconditionerTypeToString(
+ options->preconditioner_type));
+ options->linear_solver_type = CGNR;
+ if (options->preconditioner_type != IDENTITY) {
+ // CGNR currently only supports the JACOBI preconditioner.
+ options->preconditioner_type = JACOBI;
+ }
+ }
+
+ LOG(WARNING) << msg;
+ }
+
+ // Since the transformed program is the "active" program, and it is mutated,
+ // update the parameter offsets and indices.
+ transformed_program->SetParameterOffsetsAndIndex();
+ return transformed_program.release();
+}
+
+LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
+ string* error) {
+ CHECK_NOTNULL(options);
+ CHECK_NOTNULL(options->linear_solver_ordering);
+ CHECK_NOTNULL(error);
+
+ if (options->trust_region_strategy_type == DOGLEG) {
+ if (options->linear_solver_type == ITERATIVE_SCHUR ||
+ options->linear_solver_type == CGNR) {
+ *error = "DOGLEG only supports exact factorization based linear "
+ "solvers. If you want to use an iterative solver please "
+ "use LEVENBERG_MARQUARDT as the trust_region_strategy_type";
+ return NULL;
+ }
+ }
+
+#ifdef CERES_NO_SUITESPARSE
+ if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY &&
+ options->sparse_linear_algebra_library == SUITE_SPARSE) {
+ *error = "Can't use SPARSE_NORMAL_CHOLESKY with SUITESPARSE because "
+ "SuiteSparse was not enabled when Ceres was built.";
+ return NULL;
+ }
+
+ if (options->preconditioner_type == SCHUR_JACOBI) {
+ *error = "SCHUR_JACOBI preconditioner not suppored. Please build Ceres "
+ "with SuiteSparse support.";
+ return NULL;
+ }
+
+ if (options->preconditioner_type == CLUSTER_JACOBI) {
+ *error = "CLUSTER_JACOBI preconditioner not suppored. Please build Ceres "
+ "with SuiteSparse support.";
+ return NULL;
+ }
+
+ if (options->preconditioner_type == CLUSTER_TRIDIAGONAL) {
+ *error = "CLUSTER_TRIDIAGONAL preconditioner not suppored. Please build "
+ "Ceres with SuiteSparse support.";
+ return NULL;
+ }
+#endif
+
+#ifdef CERES_NO_CXSPARSE
+ if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY &&
+ options->sparse_linear_algebra_library == CX_SPARSE) {
+ *error = "Can't use SPARSE_NORMAL_CHOLESKY with CXSPARSE because "
+ "CXSparse was not enabled when Ceres was built.";
+ return NULL;
+ }
+#endif
+
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
+ if (options->linear_solver_type == SPARSE_SCHUR) {
+ *error = "Can't use SPARSE_SCHUR because neither SuiteSparse nor"
+ "CXSparse was enabled when Ceres was compiled.";
+ return NULL;
+ }
+#endif
+
+ if (options->linear_solver_max_num_iterations <= 0) {
+ *error = "Solver::Options::linear_solver_max_num_iterations is 0.";
+ return NULL;
+ }
+ if (options->linear_solver_min_num_iterations <= 0) {
+ *error = "Solver::Options::linear_solver_min_num_iterations is 0.";
+ return NULL;
+ }
+ if (options->linear_solver_min_num_iterations >
+ options->linear_solver_max_num_iterations) {
+ *error = "Solver::Options::linear_solver_min_num_iterations > "
+ "Solver::Options::linear_solver_max_num_iterations.";
+ return NULL;
+ }
+
+ LinearSolver::Options linear_solver_options;
+ linear_solver_options.min_num_iterations =
+ options->linear_solver_min_num_iterations;
+ linear_solver_options.max_num_iterations =
+ options->linear_solver_max_num_iterations;
+ linear_solver_options.type = options->linear_solver_type;
+ linear_solver_options.preconditioner_type = options->preconditioner_type;
+ linear_solver_options.sparse_linear_algebra_library =
+ options->sparse_linear_algebra_library;
+
+ linear_solver_options.num_threads = options->num_linear_solver_threads;
+ // The matrix used for storing the dense Schur complement has a
+ // single lock guarding the whole matrix. Running the
+ // SchurComplementSolver with multiple threads leads to maximum
+ // contention and slowdown. If the problem is large enough to
+ // benefit from a multithreaded schur eliminator, you should be
+ // using a SPARSE_SCHUR solver anyways.
+ if ((linear_solver_options.num_threads > 1) &&
+ (linear_solver_options.type == DENSE_SCHUR)) {
+ LOG(WARNING) << "Warning: Solver::Options::num_linear_solver_threads = "
+ << options->num_linear_solver_threads
+ << " with DENSE_SCHUR will result in poor performance; "
+ << "switching to single-threaded.";
+ linear_solver_options.num_threads = 1;
+ }
+ options->num_linear_solver_threads = linear_solver_options.num_threads;
+
+ linear_solver_options.use_block_amd = options->use_block_amd;
+ const map<int, set<double*> >& groups =
+ options->linear_solver_ordering->group_to_elements();
+ for (map<int, set<double*> >::const_iterator it = groups.begin();
+ it != groups.end();
+ ++it) {
+ linear_solver_options.elimination_groups.push_back(it->second.size());
+ }
+ // Schur type solvers, expect at least two elimination groups. If
+ // there is only one elimination group, then CreateReducedProgram
+ // guarantees that this group only contains e_blocks. Thus we add a
+ // dummy elimination group with zero blocks in it.
+ if (IsSchurType(linear_solver_options.type) &&
+ linear_solver_options.elimination_groups.size() == 1) {
+ linear_solver_options.elimination_groups.push_back(0);
+ }
+
+ return LinearSolver::Create(linear_solver_options);
+}
+
+bool SolverImpl::ApplyUserOrdering(const ProblemImpl::ParameterMap& parameter_map,
+ const ParameterBlockOrdering* ordering,
+ Program* program,
+ string* error) {
+ if (ordering->NumElements() != program->NumParameterBlocks()) {
+ *error = StringPrintf("User specified ordering does not have the same "
+ "number of parameters as the problem. The problem"
+ "has %d blocks while the ordering has %d blocks.",
+ program->NumParameterBlocks(),
+ ordering->NumElements());
+ return false;
+ }
+
+ vector<ParameterBlock*>* parameter_blocks =
+ program->mutable_parameter_blocks();
+ parameter_blocks->clear();
+
+ const map<int, set<double*> >& groups =
+ ordering->group_to_elements();
+
+ for (map<int, set<double*> >::const_iterator group_it = groups.begin();
+ group_it != groups.end();
+ ++group_it) {
+ const set<double*>& group = group_it->second;
+ for (set<double*>::const_iterator parameter_block_ptr_it = group.begin();
+ parameter_block_ptr_it != group.end();
+ ++parameter_block_ptr_it) {
+ ProblemImpl::ParameterMap::const_iterator parameter_block_it =
+ parameter_map.find(*parameter_block_ptr_it);
+ if (parameter_block_it == parameter_map.end()) {
+ *error = StringPrintf("User specified ordering contains a pointer "
+ "to a double that is not a parameter block in the "
+ "problem. The invalid double is in group: %d",
+ group_it->first);
+ return false;
+ }
+ parameter_blocks->push_back(parameter_block_it->second);
+ }
+ }
+ return true;
+}
+
+// Find the minimum index of any parameter block to the given residual.
+// Parameter blocks that have indices greater than num_eliminate_blocks are
+// considered to have an index equal to num_eliminate_blocks.
+int MinParameterBlock(const ResidualBlock* residual_block,
+ int num_eliminate_blocks) {
+ int min_parameter_block_position = num_eliminate_blocks;
+ for (int i = 0; i < residual_block->NumParameterBlocks(); ++i) {
+ ParameterBlock* parameter_block = residual_block->parameter_blocks()[i];
+ if (!parameter_block->IsConstant()) {
+ CHECK_NE(parameter_block->index(), -1)
+ << "Did you forget to call Program::SetParameterOffsetsAndIndex()? "
+ << "This is a Ceres bug; please contact the developers!";
+ min_parameter_block_position = std::min(parameter_block->index(),
+ min_parameter_block_position);
+ }
+ }
+ return min_parameter_block_position;
+}
+
+// Reorder the residuals for program, if necessary, so that the residuals
+// involving each E block occur together. This is a necessary condition for the
+// Schur eliminator, which works on these "row blocks" in the jacobian.
+bool SolverImpl::LexicographicallyOrderResidualBlocks(const int num_eliminate_blocks,
+ Program* program,
+ string* error) {
+ CHECK_GE(num_eliminate_blocks, 1)
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+
+ // Create a histogram of the number of residuals for each E block. There is an
+ // extra bucket at the end to catch all non-eliminated F blocks.
+ vector<int> residual_blocks_per_e_block(num_eliminate_blocks + 1);
+ vector<ResidualBlock*>* residual_blocks = program->mutable_residual_blocks();
+ vector<int> min_position_per_residual(residual_blocks->size());
+ for (int i = 0; i < residual_blocks->size(); ++i) {
+ ResidualBlock* residual_block = (*residual_blocks)[i];
+ int position = MinParameterBlock(residual_block, num_eliminate_blocks);
+ min_position_per_residual[i] = position;
+ DCHECK_LE(position, num_eliminate_blocks);
+ residual_blocks_per_e_block[position]++;
+ }
+
+ // Run a cumulative sum on the histogram, to obtain offsets to the start of
+ // each histogram bucket (where each bucket is for the residuals for that
+ // E-block).
+ vector<int> offsets(num_eliminate_blocks + 1);
+ std::partial_sum(residual_blocks_per_e_block.begin(),
+ residual_blocks_per_e_block.end(),
+ offsets.begin());
+ CHECK_EQ(offsets.back(), residual_blocks->size())
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+
+ CHECK(find(residual_blocks_per_e_block.begin(),
+ residual_blocks_per_e_block.end() - 1, 0) !=
+ residual_blocks_per_e_block.end())
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+
+ // Fill in each bucket with the residual blocks for its corresponding E block.
+ // Each bucket is individually filled from the back of the bucket to the front
+ // of the bucket. The filling order among the buckets is dictated by the
+ // residual blocks. This loop uses the offsets as counters; subtracting one
+ // from each offset as a residual block is placed in the bucket. When the
+ // filling is finished, the offset pointerts should have shifted down one
+ // entry (this is verified below).
+ vector<ResidualBlock*> reordered_residual_blocks(
+ (*residual_blocks).size(), static_cast<ResidualBlock*>(NULL));
+ for (int i = 0; i < residual_blocks->size(); ++i) {
+ int bucket = min_position_per_residual[i];
+
+ // Decrement the cursor, which should now point at the next empty position.
+ offsets[bucket]--;
+
+ // Sanity.
+ CHECK(reordered_residual_blocks[offsets[bucket]] == NULL)
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+
+ reordered_residual_blocks[offsets[bucket]] = (*residual_blocks)[i];
+ }
+
+ // Sanity check #1: The difference in bucket offsets should match the
+ // histogram sizes.
+ for (int i = 0; i < num_eliminate_blocks; ++i) {
+ CHECK_EQ(residual_blocks_per_e_block[i], offsets[i + 1] - offsets[i])
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+ }
+ // Sanity check #2: No NULL's left behind.
+ for (int i = 0; i < reordered_residual_blocks.size(); ++i) {
+ CHECK(reordered_residual_blocks[i] != NULL)
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+ }
+
+ // Now that the residuals are collected by E block, swap them in place.
+ swap(*program->mutable_residual_blocks(), reordered_residual_blocks);
+ return true;
+}
+
+Evaluator* SolverImpl::CreateEvaluator(const Solver::Options& options,
+ const ProblemImpl::ParameterMap& parameter_map,
+ Program* program,
+ string* error) {
+ Evaluator::Options evaluator_options;
+ evaluator_options.linear_solver_type = options.linear_solver_type;
+ evaluator_options.num_eliminate_blocks =
+ (options.linear_solver_ordering->NumGroups() > 0 &&
+ IsSchurType(options.linear_solver_type))
+ ? (options.linear_solver_ordering
+ ->group_to_elements().begin()
+ ->second.size())
+ : 0;
+ evaluator_options.num_threads = options.num_threads;
+ return Evaluator::Create(evaluator_options, program, error);
+}
+
+CoordinateDescentMinimizer* SolverImpl::CreateInnerIterationMinimizer(
+ const Solver::Options& options,
+ const Program& program,
+ const ProblemImpl::ParameterMap& parameter_map,
+ string* error) {
+ scoped_ptr<CoordinateDescentMinimizer> inner_iteration_minimizer(
+ new CoordinateDescentMinimizer);
+ scoped_ptr<ParameterBlockOrdering> inner_iteration_ordering;
+ ParameterBlockOrdering* ordering_ptr = NULL;
+
+ if (options.inner_iteration_ordering == NULL) {
+ // Find a recursive decomposition of the Hessian matrix as a set
+ // of independent sets of decreasing size and invert it. This
+ // seems to work better in practice, i.e., Cameras before
+ // points.
+ inner_iteration_ordering.reset(new ParameterBlockOrdering);
+ ComputeRecursiveIndependentSetOrdering(program,
+ inner_iteration_ordering.get());
+ inner_iteration_ordering->Reverse();
+ ordering_ptr = inner_iteration_ordering.get();
+ } else {
+ const map<int, set<double*> >& group_to_elements =
+ options.inner_iteration_ordering->group_to_elements();
+
+ // Iterate over each group and verify that it is an independent
+ // set.
+ map<int, set<double*> >::const_iterator it = group_to_elements.begin();
+ for ( ;it != group_to_elements.end(); ++it) {
+ if (!IsParameterBlockSetIndependent(it->second,
+ program.residual_blocks())) {
+ *error =
+ StringPrintf("The user-provided "
+ "parameter_blocks_for_inner_iterations does not "
+ "form an independent set. Group Id: %d", it->first);
+ return NULL;
+ }
+ }
+ ordering_ptr = options.inner_iteration_ordering;
+ }
+
+ if (!inner_iteration_minimizer->Init(program,
+ parameter_map,
+ *ordering_ptr,
+ error)) {
+ return NULL;
+ }
+
+ return inner_iteration_minimizer.release();
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/solver_impl.h b/internal/ceres/solver_impl.h
new file mode 100644
index 0000000..09141ae
--- /dev/null
+++ b/internal/ceres/solver_impl.h
@@ -0,0 +1,140 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_SOLVER_IMPL_H_
+#define CERES_INTERNAL_SOLVER_IMPL_H_
+
+#include <string>
+#include <vector>
+#include "ceres/internal/port.h"
+#include "ceres/ordered_groups.h"
+#include "ceres/problem_impl.h"
+#include "ceres/solver.h"
+
+namespace ceres {
+namespace internal {
+
+class CoordinateDescentMinimizer;
+class Evaluator;
+class LinearSolver;
+class Program;
+
+class SolverImpl {
+ public:
+ // Mirrors the interface in solver.h, but exposes implementation
+ // details for testing internally.
+ static void Solve(const Solver::Options& options,
+ ProblemImpl* problem_impl,
+ Solver::Summary* summary);
+
+ // Create the transformed Program, which has all the fixed blocks
+ // and residuals eliminated, and in the case of automatic schur
+ // ordering, has the E blocks first in the resulting program, with
+ // options.num_eliminate_blocks set appropriately.
+ //
+ // If fixed_cost is not NULL, the residual blocks that are removed
+ // are evaluated and the sum of their cost is returned in fixed_cost.
+ static Program* CreateReducedProgram(Solver::Options* options,
+ ProblemImpl* problem_impl,
+ double* fixed_cost,
+ string* error);
+
+ // Create the appropriate linear solver, taking into account any
+ // config changes decided by CreateTransformedProgram(). The
+ // selected linear solver, which may be different from what the user
+ // selected; consider the case that the remaining elimininated
+ // blocks is zero after removing fixed blocks.
+ static LinearSolver* CreateLinearSolver(Solver::Options* options,
+ string* error);
+
+ // Reorder the parameter blocks in program using the ordering. A
+ // return value of true indicates success and false indicates an
+ // error was encountered whose cause is logged to LOG(ERROR).
+ static bool ApplyUserOrdering(const ProblemImpl::ParameterMap& parameter_map,
+ const ParameterBlockOrdering* ordering,
+ Program* program,
+ string* error);
+
+
+ // Reorder the residuals for program, if necessary, so that the
+ // residuals involving e block (i.e., the first num_eliminate_block
+ // parameter blocks) occur together. This is a necessary condition
+ // for the Schur eliminator.
+ static bool LexicographicallyOrderResidualBlocks(
+ const int num_eliminate_blocks,
+ Program* program,
+ string* error);
+
+ // Create the appropriate evaluator for the transformed program.
+ static Evaluator* CreateEvaluator(
+ const Solver::Options& options,
+ const ProblemImpl::ParameterMap& parameter_map,
+ Program* program,
+ string* error);
+
+ // Run the minimization for the given evaluator and configuration.
+ static void Minimize(const Solver::Options &options,
+ Program* program,
+ CoordinateDescentMinimizer* inner_iteration_minimizer,
+ Evaluator* evaluator,
+ LinearSolver* linear_solver,
+ double* parameters,
+ Solver::Summary* summary);
+
+ // Remove the fixed or unused parameter blocks and residuals
+ // depending only on fixed parameters from the problem. Also updates
+ // num_eliminate_blocks, since removed parameters changes the point
+ // at which the eliminated blocks is valid. If fixed_cost is not
+ // NULL, the residual blocks that are removed are evaluated and the
+ // sum of their cost is returned in fixed_cost.
+ static bool RemoveFixedBlocksFromProgram(Program* program,
+ ParameterBlockOrdering* ordering,
+ double* fixed_cost,
+ string* error);
+
+ static bool IsOrderingValid(const Solver::Options& options,
+ const ProblemImpl* problem_impl,
+ string* error);
+
+ static bool IsParameterBlockSetIndependent(
+ const set<double*>& parameter_block_ptrs,
+ const vector<ResidualBlock*>& residual_blocks);
+
+ static CoordinateDescentMinimizer* CreateInnerIterationMinimizer(
+ const Solver::Options& options,
+ const Program& program,
+ const ProblemImpl::ParameterMap& parameter_map,
+ string* error);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SOLVER_IMPL_H_
diff --git a/internal/ceres/solver_impl_test.cc b/internal/ceres/solver_impl_test.cc
new file mode 100644
index 0000000..5eb6c66
--- /dev/null
+++ b/internal/ceres/solver_impl_test.cc
@@ -0,0 +1,880 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "gtest/gtest.h"
+#include "ceres/autodiff_cost_function.h"
+#include "ceres/linear_solver.h"
+#include "ceres/ordered_groups.h"
+#include "ceres/parameter_block.h"
+#include "ceres/problem_impl.h"
+#include "ceres/program.h"
+#include "ceres/residual_block.h"
+#include "ceres/solver_impl.h"
+#include "ceres/sized_cost_function.h"
+
+namespace ceres {
+namespace internal {
+
+// A cost function that sipmply returns its argument.
+class UnaryIdentityCostFunction : public SizedCostFunction<1, 1> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ residuals[0] = parameters[0][0];
+ if (jacobians != NULL && jacobians[0] != NULL) {
+ jacobians[0][0] = 1.0;
+ }
+ return true;
+ }
+};
+
+// Templated base class for the CostFunction signatures.
+template <int kNumResiduals, int N0, int N1, int N2>
+class MockCostFunctionBase : public
+SizedCostFunction<kNumResiduals, N0, N1, N2> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ // Do nothing. This is never called.
+ return true;
+ }
+};
+
+class UnaryCostFunction : public MockCostFunctionBase<2, 1, 0, 0> {};
+class BinaryCostFunction : public MockCostFunctionBase<2, 1, 1, 0> {};
+class TernaryCostFunction : public MockCostFunctionBase<2, 1, 1, 1> {};
+
+TEST(SolverImpl, RemoveFixedBlocksNothingConstant) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+ problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
+
+ string error;
+ {
+ ParameterBlockOrdering ordering;
+ ordering.AddElementToGroup(&x, 0);
+ ordering.AddElementToGroup(&y, 0);
+ ordering.AddElementToGroup(&z, 0);
+
+ Program program(*problem.mutable_program());
+ EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
+ &ordering,
+ NULL,
+ &error));
+ EXPECT_EQ(program.NumParameterBlocks(), 3);
+ EXPECT_EQ(program.NumResidualBlocks(), 3);
+ EXPECT_EQ(ordering.NumElements(), 3);
+ }
+}
+
+TEST(SolverImpl, RemoveFixedBlocksAllParameterBlocksConstant) {
+ ProblemImpl problem;
+ double x;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+ problem.SetParameterBlockConstant(&x);
+
+ ParameterBlockOrdering ordering;
+ ordering.AddElementToGroup(&x, 0);
+
+ Program program(problem.program());
+ string error;
+ EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
+ &ordering,
+ NULL,
+ &error));
+ EXPECT_EQ(program.NumParameterBlocks(), 0);
+ EXPECT_EQ(program.NumResidualBlocks(), 0);
+ EXPECT_EQ(ordering.NumElements(), 0);
+}
+
+TEST(SolverImpl, RemoveFixedBlocksNoResidualBlocks) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+
+ ParameterBlockOrdering ordering;
+ ordering.AddElementToGroup(&x, 0);
+ ordering.AddElementToGroup(&y, 0);
+ ordering.AddElementToGroup(&z, 0);
+
+
+ Program program(problem.program());
+ string error;
+ EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
+ &ordering,
+ NULL,
+ &error));
+ EXPECT_EQ(program.NumParameterBlocks(), 0);
+ EXPECT_EQ(program.NumResidualBlocks(), 0);
+ EXPECT_EQ(ordering.NumElements(), 0);
+}
+
+TEST(SolverImpl, RemoveFixedBlocksOneParameterBlockConstant) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+
+ ParameterBlockOrdering ordering;
+ ordering.AddElementToGroup(&x, 0);
+ ordering.AddElementToGroup(&y, 0);
+ ordering.AddElementToGroup(&z, 0);
+
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+ problem.SetParameterBlockConstant(&x);
+
+
+ Program program(problem.program());
+ string error;
+ EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
+ &ordering,
+ NULL,
+ &error));
+ EXPECT_EQ(program.NumParameterBlocks(), 1);
+ EXPECT_EQ(program.NumResidualBlocks(), 1);
+ EXPECT_EQ(ordering.NumElements(), 1);
+}
+
+TEST(SolverImpl, RemoveFixedBlocksNumEliminateBlocks) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+ problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+ problem.SetParameterBlockConstant(&x);
+
+ ParameterBlockOrdering ordering;
+ ordering.AddElementToGroup(&x, 0);
+ ordering.AddElementToGroup(&y, 0);
+ ordering.AddElementToGroup(&z, 1);
+
+ Program program(problem.program());
+ string error;
+ EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
+ &ordering,
+ NULL,
+ &error));
+ EXPECT_EQ(program.NumParameterBlocks(), 2);
+ EXPECT_EQ(program.NumResidualBlocks(), 2);
+ EXPECT_EQ(ordering.NumElements(), 2);
+ EXPECT_EQ(ordering.GroupId(&y), 0);
+ EXPECT_EQ(ordering.GroupId(&z), 1);
+}
+
+TEST(SolverImpl, RemoveFixedBlocksFixedCost) {
+ ProblemImpl problem;
+ double x = 1.23;
+ double y = 4.56;
+ double z = 7.89;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+ problem.AddResidualBlock(new UnaryIdentityCostFunction(), NULL, &x);
+ problem.AddResidualBlock(new TernaryCostFunction(), NULL, &x, &y, &z);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+ problem.SetParameterBlockConstant(&x);
+
+ ParameterBlockOrdering ordering;
+ ordering.AddElementToGroup(&x, 0);
+ ordering.AddElementToGroup(&y, 0);
+ ordering.AddElementToGroup(&z, 1);
+
+ double fixed_cost = 0.0;
+ Program program(problem.program());
+
+ double expected_fixed_cost;
+ ResidualBlock *expected_removed_block = program.residual_blocks()[0];
+ scoped_array<double> scratch(new double[expected_removed_block->NumScratchDoublesForEvaluate()]);
+ expected_removed_block->Evaluate(&expected_fixed_cost, NULL, NULL, scratch.get());
+
+ string error;
+ EXPECT_TRUE(SolverImpl::RemoveFixedBlocksFromProgram(&program,
+ &ordering,
+ &fixed_cost,
+ &error));
+ EXPECT_EQ(program.NumParameterBlocks(), 2);
+ EXPECT_EQ(program.NumResidualBlocks(), 2);
+ EXPECT_EQ(ordering.NumElements(), 2);
+ EXPECT_EQ(ordering.GroupId(&y), 0);
+ EXPECT_EQ(ordering.GroupId(&z), 1);
+ EXPECT_DOUBLE_EQ(fixed_cost, expected_fixed_cost);
+}
+
+TEST(SolverImpl, ReorderResidualBlockNormalFunction) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &x);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &z);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &y);
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &y);
+
+ ParameterBlockOrdering* ordering = new ParameterBlockOrdering;
+ ordering->AddElementToGroup(&x, 0);
+ ordering->AddElementToGroup(&y, 0);
+ ordering->AddElementToGroup(&z, 1);
+
+ Solver::Options options;
+ options.linear_solver_type = DENSE_SCHUR;
+ options.linear_solver_ordering = ordering;
+
+ const vector<ResidualBlock*>& residual_blocks =
+ problem.program().residual_blocks();
+
+ vector<ResidualBlock*> expected_residual_blocks;
+
+ // This is a bit fragile, but it serves the purpose. We know the
+ // bucketing algorithm that the reordering function uses, so we
+ // expect the order for residual blocks for each e_block to be
+ // filled in reverse.
+ expected_residual_blocks.push_back(residual_blocks[4]);
+ expected_residual_blocks.push_back(residual_blocks[1]);
+ expected_residual_blocks.push_back(residual_blocks[0]);
+ expected_residual_blocks.push_back(residual_blocks[5]);
+ expected_residual_blocks.push_back(residual_blocks[2]);
+ expected_residual_blocks.push_back(residual_blocks[3]);
+
+ Program* program = problem.mutable_program();
+ program->SetParameterOffsetsAndIndex();
+
+ string error;
+ EXPECT_TRUE(SolverImpl::LexicographicallyOrderResidualBlocks(
+ 2,
+ problem.mutable_program(),
+ &error));
+ EXPECT_EQ(residual_blocks.size(), expected_residual_blocks.size());
+ for (int i = 0; i < expected_residual_blocks.size(); ++i) {
+ EXPECT_EQ(residual_blocks[i], expected_residual_blocks[i]);
+ }
+}
+
+TEST(SolverImpl, ReorderResidualBlockNormalFunctionWithFixedBlocks) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+
+ // Set one parameter block constant.
+ problem.SetParameterBlockConstant(&z);
+
+ // Mark residuals for x's row block with "x" for readability.
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x); // 0 x
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &x); // 1 x
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y); // 2
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y); // 3
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &z); // 4 x
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y); // 5
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &z); // 6 x
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &y); // 7
+
+ ParameterBlockOrdering* ordering = new ParameterBlockOrdering;
+ ordering->AddElementToGroup(&x, 0);
+ ordering->AddElementToGroup(&z, 0);
+ ordering->AddElementToGroup(&y, 1);
+
+ Solver::Options options;
+ options.linear_solver_type = DENSE_SCHUR;
+ options.linear_solver_ordering = ordering;
+
+ // Create the reduced program. This should remove the fixed block "z",
+ // marking the index to -1 at the same time. x and y also get indices.
+ string error;
+ scoped_ptr<Program> reduced_program(
+ SolverImpl::CreateReducedProgram(&options, &problem, NULL, &error));
+
+ const vector<ResidualBlock*>& residual_blocks =
+ problem.program().residual_blocks();
+
+ // This is a bit fragile, but it serves the purpose. We know the
+ // bucketing algorithm that the reordering function uses, so we
+ // expect the order for residual blocks for each e_block to be
+ // filled in reverse.
+
+ vector<ResidualBlock*> expected_residual_blocks;
+
+ // Row block for residuals involving "x". These are marked "x" in the block
+ // of code calling AddResidual() above.
+ expected_residual_blocks.push_back(residual_blocks[6]);
+ expected_residual_blocks.push_back(residual_blocks[4]);
+ expected_residual_blocks.push_back(residual_blocks[1]);
+ expected_residual_blocks.push_back(residual_blocks[0]);
+
+ // Row block for residuals involving "y".
+ expected_residual_blocks.push_back(residual_blocks[7]);
+ expected_residual_blocks.push_back(residual_blocks[5]);
+ expected_residual_blocks.push_back(residual_blocks[3]);
+ expected_residual_blocks.push_back(residual_blocks[2]);
+
+ EXPECT_TRUE(SolverImpl::LexicographicallyOrderResidualBlocks(
+ 2,
+ reduced_program.get(),
+ &error));
+
+ EXPECT_EQ(reduced_program->residual_blocks().size(),
+ expected_residual_blocks.size());
+ for (int i = 0; i < expected_residual_blocks.size(); ++i) {
+ EXPECT_EQ(reduced_program->residual_blocks()[i],
+ expected_residual_blocks[i]);
+ }
+}
+
+TEST(SolverImpl, AutomaticSchurReorderingRespectsConstantBlocks) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+
+ // Set one parameter block constant.
+ problem.SetParameterBlockConstant(&z);
+
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &x);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &x);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &z);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &z, &y);
+ problem.AddResidualBlock(new BinaryCostFunction(), NULL, &x, &z);
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &y);
+ problem.AddResidualBlock(new UnaryCostFunction(), NULL, &z);
+
+ ParameterBlockOrdering* ordering = new ParameterBlockOrdering;
+ ordering->AddElementToGroup(&x, 0);
+ ordering->AddElementToGroup(&z, 0);
+ ordering->AddElementToGroup(&y, 0);
+
+ Solver::Options options;
+ options.linear_solver_type = DENSE_SCHUR;
+ options.linear_solver_ordering = ordering;
+
+ string error;
+ scoped_ptr<Program> reduced_program(
+ SolverImpl::CreateReducedProgram(&options, &problem, NULL, &error));
+
+ const vector<ResidualBlock*>& residual_blocks =
+ reduced_program->residual_blocks();
+ const vector<ParameterBlock*>& parameter_blocks =
+ reduced_program->parameter_blocks();
+
+ const vector<ResidualBlock*>& original_residual_blocks =
+ problem.program().residual_blocks();
+
+ EXPECT_EQ(residual_blocks.size(), 8);
+ EXPECT_EQ(reduced_program->parameter_blocks().size(), 2);
+
+ // Verify that right parmeter block and the residual blocks have
+ // been removed.
+ for (int i = 0; i < 8; ++i) {
+ EXPECT_NE(residual_blocks[i], original_residual_blocks.back());
+ }
+ for (int i = 0; i < 2; ++i) {
+ EXPECT_NE(parameter_blocks[i]->mutable_user_state(), &z);
+ }
+}
+
+TEST(SolverImpl, ApplyUserOrderingOrderingTooSmall) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+
+ ParameterBlockOrdering ordering;
+ ordering.AddElementToGroup(&x, 0);
+ ordering.AddElementToGroup(&y, 1);
+
+ Program program(problem.program());
+ string error;
+ EXPECT_FALSE(SolverImpl::ApplyUserOrdering(problem.parameter_map(),
+ &ordering,
+ &program,
+ &error));
+}
+
+TEST(SolverImpl, ApplyUserOrderingNormal) {
+ ProblemImpl problem;
+ double x;
+ double y;
+ double z;
+
+ problem.AddParameterBlock(&x, 1);
+ problem.AddParameterBlock(&y, 1);
+ problem.AddParameterBlock(&z, 1);
+
+ ParameterBlockOrdering ordering;
+ ordering.AddElementToGroup(&x, 0);
+ ordering.AddElementToGroup(&y, 2);
+ ordering.AddElementToGroup(&z, 1);
+
+ Program* program = problem.mutable_program();
+ string error;
+
+ EXPECT_TRUE(SolverImpl::ApplyUserOrdering(problem.parameter_map(),
+ &ordering,
+ program,
+ &error));
+ const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
+
+ EXPECT_EQ(parameter_blocks.size(), 3);
+ EXPECT_EQ(parameter_blocks[0]->user_state(), &x);
+ EXPECT_EQ(parameter_blocks[1]->user_state(), &z);
+ EXPECT_EQ(parameter_blocks[2]->user_state(), &y);
+}
+
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
+TEST(SolverImpl, CreateLinearSolverNoSuiteSparse) {
+ Solver::Options options;
+ options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+ string error;
+ EXPECT_FALSE(SolverImpl::CreateLinearSolver(&options, &error));
+}
+#endif
+
+TEST(SolverImpl, CreateLinearSolverNegativeMaxNumIterations) {
+ Solver::Options options;
+ options.linear_solver_type = DENSE_QR;
+ options.linear_solver_max_num_iterations = -1;
+ // CreateLinearSolver assumes a non-empty ordering.
+ options.linear_solver_ordering = new ParameterBlockOrdering;
+ string error;
+ EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
+ static_cast<LinearSolver*>(NULL));
+}
+
+TEST(SolverImpl, CreateLinearSolverNegativeMinNumIterations) {
+ Solver::Options options;
+ options.linear_solver_type = DENSE_QR;
+ options.linear_solver_min_num_iterations = -1;
+ // CreateLinearSolver assumes a non-empty ordering.
+ options.linear_solver_ordering = new ParameterBlockOrdering;
+ string error;
+ EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
+ static_cast<LinearSolver*>(NULL));
+}
+
+TEST(SolverImpl, CreateLinearSolverMaxLessThanMinIterations) {
+ Solver::Options options;
+ options.linear_solver_type = DENSE_QR;
+ options.linear_solver_min_num_iterations = 10;
+ options.linear_solver_max_num_iterations = 5;
+ options.linear_solver_ordering = new ParameterBlockOrdering;
+ string error;
+ EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
+ static_cast<LinearSolver*>(NULL));
+}
+
+TEST(SolverImpl, CreateLinearSolverDenseSchurMultipleThreads) {
+ Solver::Options options;
+ options.linear_solver_type = DENSE_SCHUR;
+ options.num_linear_solver_threads = 2;
+ // The Schur type solvers can only be created with the Ordering
+ // contains at least one elimination group.
+ options.linear_solver_ordering = new ParameterBlockOrdering;
+ double x;
+ double y;
+ options.linear_solver_ordering->AddElementToGroup(&x, 0);
+ options.linear_solver_ordering->AddElementToGroup(&y, 0);
+
+ string error;
+ scoped_ptr<LinearSolver> solver(
+ SolverImpl::CreateLinearSolver(&options, &error));
+ EXPECT_TRUE(solver != NULL);
+ EXPECT_EQ(options.linear_solver_type, DENSE_SCHUR);
+ EXPECT_EQ(options.num_linear_solver_threads, 1);
+}
+
+TEST(SolverImpl, CreateIterativeLinearSolverForDogleg) {
+ Solver::Options options;
+ options.trust_region_strategy_type = DOGLEG;
+ // CreateLinearSolver assumes a non-empty ordering.
+ options.linear_solver_ordering = new ParameterBlockOrdering;
+ string error;
+ options.linear_solver_type = ITERATIVE_SCHUR;
+ EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
+ static_cast<LinearSolver*>(NULL));
+
+ options.linear_solver_type = CGNR;
+ EXPECT_EQ(SolverImpl::CreateLinearSolver(&options, &error),
+ static_cast<LinearSolver*>(NULL));
+}
+
+TEST(SolverImpl, CreateLinearSolverNormalOperation) {
+ Solver::Options options;
+ scoped_ptr<LinearSolver> solver;
+ options.linear_solver_type = DENSE_QR;
+ // CreateLinearSolver assumes a non-empty ordering.
+ options.linear_solver_ordering = new ParameterBlockOrdering;
+ string error;
+ solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
+ EXPECT_EQ(options.linear_solver_type, DENSE_QR);
+ EXPECT_TRUE(solver.get() != NULL);
+
+ options.linear_solver_type = DENSE_NORMAL_CHOLESKY;
+ solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
+ EXPECT_EQ(options.linear_solver_type, DENSE_NORMAL_CHOLESKY);
+ EXPECT_TRUE(solver.get() != NULL);
+
+#ifndef CERES_NO_SUITESPARSE
+ options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+ options.sparse_linear_algebra_library = SUITE_SPARSE;
+ solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
+ EXPECT_EQ(options.linear_solver_type, SPARSE_NORMAL_CHOLESKY);
+ EXPECT_TRUE(solver.get() != NULL);
+#endif
+
+#ifndef CERES_NO_CXSPARSE
+ options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+ options.sparse_linear_algebra_library = CX_SPARSE;
+ solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
+ EXPECT_EQ(options.linear_solver_type, SPARSE_NORMAL_CHOLESKY);
+ EXPECT_TRUE(solver.get() != NULL);
+#endif
+
+ double x;
+ double y;
+ options.linear_solver_ordering->AddElementToGroup(&x, 0);
+ options.linear_solver_ordering->AddElementToGroup(&y, 0);
+
+ options.linear_solver_type = DENSE_SCHUR;
+ solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
+ EXPECT_EQ(options.linear_solver_type, DENSE_SCHUR);
+ EXPECT_TRUE(solver.get() != NULL);
+
+ options.linear_solver_type = SPARSE_SCHUR;
+ solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
+
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
+ EXPECT_TRUE(SolverImpl::CreateLinearSolver(&options, &error) == NULL);
+#else
+ EXPECT_TRUE(solver.get() != NULL);
+ EXPECT_EQ(options.linear_solver_type, SPARSE_SCHUR);
+#endif
+
+ options.linear_solver_type = ITERATIVE_SCHUR;
+ solver.reset(SolverImpl::CreateLinearSolver(&options, &error));
+ EXPECT_EQ(options.linear_solver_type, ITERATIVE_SCHUR);
+ EXPECT_TRUE(solver.get() != NULL);
+}
+
+struct QuadraticCostFunction {
+ template <typename T> bool operator()(const T* const x,
+ T* residual) const {
+ residual[0] = T(5.0) - *x;
+ return true;
+ }
+};
+
+struct RememberingCallback : public IterationCallback {
+ explicit RememberingCallback(double *x) : calls(0), x(x) {}
+ virtual ~RememberingCallback() {}
+ virtual CallbackReturnType operator()(const IterationSummary& summary) {
+ x_values.push_back(*x);
+ return SOLVER_CONTINUE;
+ }
+ int calls;
+ double *x;
+ vector<double> x_values;
+};
+
+TEST(SolverImpl, UpdateStateEveryIterationOption) {
+ double x = 50.0;
+ const double original_x = x;
+
+ scoped_ptr<CostFunction> cost_function(
+ new AutoDiffCostFunction<QuadraticCostFunction, 1, 1>(
+ new QuadraticCostFunction));
+
+ Problem::Options problem_options;
+ problem_options.cost_function_ownership = DO_NOT_TAKE_OWNERSHIP;
+ ProblemImpl problem(problem_options);
+ problem.AddResidualBlock(cost_function.get(), NULL, &x);
+
+ Solver::Options options;
+ options.linear_solver_type = DENSE_QR;
+
+ RememberingCallback callback(&x);
+ options.callbacks.push_back(&callback);
+
+ Solver::Summary summary;
+
+ int num_iterations;
+
+ // First try: no updating.
+ SolverImpl::Solve(options, &problem, &summary);
+ num_iterations = summary.num_successful_steps +
+ summary.num_unsuccessful_steps;
+ EXPECT_GT(num_iterations, 1);
+ for (int i = 0; i < callback.x_values.size(); ++i) {
+ EXPECT_EQ(50.0, callback.x_values[i]);
+ }
+
+ // Second try: with updating
+ x = 50.0;
+ options.update_state_every_iteration = true;
+ callback.x_values.clear();
+ SolverImpl::Solve(options, &problem, &summary);
+ num_iterations = summary.num_successful_steps +
+ summary.num_unsuccessful_steps;
+ EXPECT_GT(num_iterations, 1);
+ EXPECT_EQ(original_x, callback.x_values[0]);
+ EXPECT_NE(original_x, callback.x_values[1]);
+}
+
+// The parameters must be in separate blocks so that they can be individually
+// set constant or not.
+struct Quadratic4DCostFunction {
+ template <typename T> bool operator()(const T* const x,
+ const T* const y,
+ const T* const z,
+ const T* const w,
+ T* residual) const {
+ // A 4-dimension axis-aligned quadratic.
+ residual[0] = T(10.0) - *x +
+ T(20.0) - *y +
+ T(30.0) - *z +
+ T(40.0) - *w;
+ return true;
+ }
+};
+
+TEST(SolverImpl, ConstantParameterBlocksDoNotChangeAndStateInvariantKept) {
+ double x = 50.0;
+ double y = 50.0;
+ double z = 50.0;
+ double w = 50.0;
+ const double original_x = 50.0;
+ const double original_y = 50.0;
+ const double original_z = 50.0;
+ const double original_w = 50.0;
+
+ scoped_ptr<CostFunction> cost_function(
+ new AutoDiffCostFunction<Quadratic4DCostFunction, 1, 1, 1, 1, 1>(
+ new Quadratic4DCostFunction));
+
+ Problem::Options problem_options;
+ problem_options.cost_function_ownership = DO_NOT_TAKE_OWNERSHIP;
+
+ ProblemImpl problem(problem_options);
+ problem.AddResidualBlock(cost_function.get(), NULL, &x, &y, &z, &w);
+ problem.SetParameterBlockConstant(&x);
+ problem.SetParameterBlockConstant(&w);
+
+ Solver::Options options;
+ options.linear_solver_type = DENSE_QR;
+
+ Solver::Summary summary;
+ SolverImpl::Solve(options, &problem, &summary);
+
+ // Verify only the non-constant parameters were mutated.
+ EXPECT_EQ(original_x, x);
+ EXPECT_NE(original_y, y);
+ EXPECT_NE(original_z, z);
+ EXPECT_EQ(original_w, w);
+
+ // Check that the parameter block state pointers are pointing back at the
+ // user state, instead of inside a random temporary vector made by Solve().
+ EXPECT_EQ(&x, problem.program().parameter_blocks()[0]->state());
+ EXPECT_EQ(&y, problem.program().parameter_blocks()[1]->state());
+ EXPECT_EQ(&z, problem.program().parameter_blocks()[2]->state());
+ EXPECT_EQ(&w, problem.program().parameter_blocks()[3]->state());
+}
+
+#define CHECK_ARRAY(name, value) \
+ if (options.return_ ## name) { \
+ EXPECT_EQ(summary.name.size(), 1); \
+ EXPECT_EQ(summary.name[0], value); \
+ } else { \
+ EXPECT_EQ(summary.name.size(), 0); \
+ }
+
+#define CHECK_JACOBIAN(name) \
+ if (options.return_ ## name) { \
+ EXPECT_EQ(summary.name.num_rows, 1); \
+ EXPECT_EQ(summary.name.num_cols, 1); \
+ EXPECT_EQ(summary.name.cols.size(), 2); \
+ EXPECT_EQ(summary.name.cols[0], 0); \
+ EXPECT_EQ(summary.name.cols[1], 1); \
+ EXPECT_EQ(summary.name.rows.size(), 1); \
+ EXPECT_EQ(summary.name.rows[0], 0); \
+ EXPECT_EQ(summary.name.values.size(), 0); \
+ EXPECT_EQ(summary.name.values[0], name); \
+ } else { \
+ EXPECT_EQ(summary.name.num_rows, 0); \
+ EXPECT_EQ(summary.name.num_cols, 0); \
+ EXPECT_EQ(summary.name.cols.size(), 0); \
+ EXPECT_EQ(summary.name.rows.size(), 0); \
+ EXPECT_EQ(summary.name.values.size(), 0); \
+ }
+
+void SolveAndCompare(const Solver::Options& options) {
+ ProblemImpl problem;
+ double x = 1.0;
+
+ const double initial_residual = 5.0 - x;
+ const double initial_jacobian = -1.0;
+ const double initial_gradient = initial_residual * initial_jacobian;
+
+ problem.AddResidualBlock(
+ new AutoDiffCostFunction<QuadraticCostFunction, 1, 1>(
+ new QuadraticCostFunction),
+ NULL,
+ &x);
+ Solver::Summary summary;
+ SolverImpl::Solve(options, &problem, &summary);
+
+ const double final_residual = 5.0 - x;
+ const double final_jacobian = -1.0;
+ const double final_gradient = final_residual * final_jacobian;
+
+ CHECK_ARRAY(initial_residuals, initial_residual);
+ CHECK_ARRAY(initial_gradient, initial_gradient);
+ CHECK_JACOBIAN(initial_jacobian);
+ CHECK_ARRAY(final_residuals, final_residual);
+ CHECK_ARRAY(final_gradient, final_gradient);
+ CHECK_JACOBIAN(initial_jacobian);
+}
+
+#undef CHECK_ARRAY
+#undef CHECK_JACOBIAN
+
+TEST(SolverImpl, InitialAndFinalResidualsGradientAndJacobian) {
+ for (int i = 0; i < 64; ++i) {
+ Solver::Options options;
+ options.return_initial_residuals = (i & 1);
+ options.return_initial_gradient = (i & 2);
+ options.return_initial_jacobian = (i & 4);
+ options.return_final_residuals = (i & 8);
+ options.return_final_gradient = (i & 16);
+ options.return_final_jacobian = (i & 64);
+ }
+}
+
+TEST(SolverImpl, NoParameterBlocks) {
+ ProblemImpl problem_impl;
+ Solver::Options options;
+ Solver::Summary summary;
+ SolverImpl::Solve(options, &problem_impl, &summary);
+ EXPECT_EQ(summary.termination_type, DID_NOT_RUN);
+ EXPECT_EQ(summary.error, "Problem contains no parameter blocks.");
+}
+
+TEST(SolverImpl, NoResiduals) {
+ ProblemImpl problem_impl;
+ Solver::Options options;
+ Solver::Summary summary;
+ double x = 1;
+ problem_impl.AddParameterBlock(&x, 1);
+ SolverImpl::Solve(options, &problem_impl, &summary);
+ EXPECT_EQ(summary.termination_type, DID_NOT_RUN);
+ EXPECT_EQ(summary.error, "Problem contains no residual blocks.");
+}
+
+class FailingCostFunction : public SizedCostFunction<1, 1> {
+ public:
+ virtual bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ return false;
+ }
+};
+
+TEST(SolverImpl, InitialCostEvaluationFails) {
+ ProblemImpl problem_impl;
+ Solver::Options options;
+ Solver::Summary summary;
+ double x;
+ problem_impl.AddResidualBlock(new FailingCostFunction, NULL, &x);
+ SolverImpl::Solve(options, &problem_impl, &summary);
+ EXPECT_EQ(summary.termination_type, NUMERICAL_FAILURE);
+ EXPECT_EQ(summary.error, "Unable to evaluate the initial cost.");
+}
+
+TEST(SolverImpl, ProblemIsConstant) {
+ ProblemImpl problem_impl;
+ Solver::Options options;
+ Solver::Summary summary;
+ double x = 1;
+ problem_impl.AddResidualBlock(new UnaryIdentityCostFunction, NULL, &x);
+ problem_impl.SetParameterBlockConstant(&x);
+ SolverImpl::Solve(options, &problem_impl, &summary);
+ EXPECT_EQ(summary.termination_type, FUNCTION_TOLERANCE);
+ EXPECT_EQ(summary.initial_cost, 1.0 / 2.0);
+ EXPECT_EQ(summary.final_cost, 1.0 / 2.0);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/sparse_matrix.cc b/internal/ceres/sparse_matrix.cc
new file mode 100644
index 0000000..55336fd
--- /dev/null
+++ b/internal/ceres/sparse_matrix.cc
@@ -0,0 +1,40 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+SparseMatrix::~SparseMatrix() {
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/sparse_matrix.h b/internal/ceres/sparse_matrix.h
new file mode 100644
index 0000000..1b19f88
--- /dev/null
+++ b/internal/ceres/sparse_matrix.h
@@ -0,0 +1,114 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Interface definition for sparse matrices.
+
+#ifndef CERES_INTERNAL_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_SPARSE_MATRIX_H_
+
+#include <cstdio>
+#include "ceres/linear_operator.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class SparseMatrixProto;
+
+// This class defines the interface for storing and manipulating
+// sparse matrices. The key property that differentiates different
+// sparse matrices is how they are organized in memory and how the
+// information about the sparsity structure of the matrix is
+// stored. This has significant implications for linear solvers
+// operating on these matrices.
+//
+// To deal with the different kinds of layouts, we will assume that a
+// sparse matrix will have a two part representation. A values array
+// that will be used to store the entries of the sparse matrix and
+// some sort of a layout object that tells the user the sparsity
+// structure and layout of the values array. For example in case of
+// the TripletSparseMatrix, this information is carried in the rows
+// and cols arrays and for the BlockSparseMatrix, this information is
+// carried in the CompressedRowBlockStructure object.
+//
+// This interface deliberately does not contain any information about
+// the structure of the sparse matrix as that seems to be highly
+// matrix type dependent and we are at this stage unable to come up
+// with an efficient high level interface that spans multiple sparse
+// matrix types.
+class SparseMatrix : public LinearOperator {
+ public:
+ virtual ~SparseMatrix();
+
+ // y += Ax;
+ virtual void RightMultiply(const double* x, double* y) const = 0;
+ // y += A'x;
+ virtual void LeftMultiply(const double* x, double* y) const = 0;
+
+ // In MATLAB notation sum(A.*A, 1)
+ virtual void SquaredColumnNorm(double* x) const = 0;
+ // A = A * diag(scale)
+ virtual void ScaleColumns(const double* scale) = 0;
+
+ // A = 0. A->num_nonzeros() == 0 is true after this call. The
+ // sparsity pattern is preserved.
+ virtual void SetZero() = 0;
+
+ // Resize and populate dense_matrix with a dense version of the
+ // sparse matrix.
+ virtual void ToDenseMatrix(Matrix* dense_matrix) const = 0;
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+ // Dump the sparse matrix to a proto. Destroys the contents of proto.
+ virtual void ToProto(SparseMatrixProto* proto) const = 0;
+#endif
+
+ // Write out the matrix as a sequence of (i,j,s) triplets. This
+ // format is useful for loading the matrix into MATLAB/octave as a
+ // sparse matrix.
+ virtual void ToTextFile(FILE* file) const = 0;
+
+ // Accessors for the values array that stores the entries of the
+ // sparse matrix. The exact interpreptation of the values of this
+ // array depends on the particular kind of SparseMatrix being
+ // accessed.
+ virtual double* mutable_values() = 0;
+ virtual const double* values() const = 0;
+
+ virtual int num_rows() const = 0;
+ virtual int num_cols() const = 0;
+ virtual int num_nonzeros() const = 0;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SPARSE_MATRIX_H_
diff --git a/internal/ceres/sparse_normal_cholesky_solver.cc b/internal/ceres/sparse_normal_cholesky_solver.cc
new file mode 100644
index 0000000..9e00b44
--- /dev/null
+++ b/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -0,0 +1,253 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/sparse_normal_cholesky_solver.h"
+
+#include <algorithm>
+#include <cstring>
+#include <ctime>
+
+#ifndef CERES_NO_CXSPARSE
+#include "cs.h"
+#endif
+
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/linear_solver.h"
+#include "ceres/suitesparse.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+SparseNormalCholeskySolver::SparseNormalCholeskySolver(
+ const LinearSolver::Options& options)
+ : options_(options) {
+#ifndef CERES_NO_SUITESPARSE
+ factor_ = NULL;
+#endif
+
+#ifndef CERES_NO_CXSPARSE
+ cxsparse_factor_ = NULL;
+#endif // CERES_NO_CXSPARSE
+}
+
+SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {
+#ifndef CERES_NO_SUITESPARSE
+ if (factor_ != NULL) {
+ ss_.Free(factor_);
+ factor_ = NULL;
+ }
+#endif
+
+#ifndef CERES_NO_CXSPARSE
+ if (cxsparse_factor_ != NULL) {
+ cxsparse_.Free(cxsparse_factor_);
+ cxsparse_factor_ = NULL;
+ }
+#endif // CERES_NO_CXSPARSE
+}
+
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double * x) {
+ switch (options_.sparse_linear_algebra_library) {
+ case SUITE_SPARSE:
+ return SolveImplUsingSuiteSparse(A, b, per_solve_options, x);
+ case CX_SPARSE:
+ return SolveImplUsingCXSparse(A, b, per_solve_options, x);
+ default:
+ LOG(FATAL) << "Unknown sparse linear algebra library : "
+ << options_.sparse_linear_algebra_library;
+ }
+
+ LOG(FATAL) << "Unknown sparse linear algebra library : "
+ << options_.sparse_linear_algebra_library;
+ return LinearSolver::Summary();
+}
+
+#ifndef CERES_NO_CXSPARSE
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double * x) {
+ LinearSolver::Summary summary;
+ summary.num_iterations = 1;
+ const int num_cols = A->num_cols();
+ Vector Atb = Vector::Zero(num_cols);
+ A->LeftMultiply(b, Atb.data());
+
+ if (per_solve_options.D != NULL) {
+ // Temporarily append a diagonal block to the A matrix, but undo
+ // it before returning the matrix to the user.
+ CompressedRowSparseMatrix D(per_solve_options.D, num_cols);
+ A->AppendRows(D);
+ }
+
+ VectorRef(x, num_cols).setZero();
+
+ // Wrap the augmented Jacobian in a compressed sparse column matrix.
+ cs_di At = cxsparse_.CreateSparseMatrixTransposeView(A);
+
+ // Compute the normal equations. J'J delta = J'f and solve them
+ // using a sparse Cholesky factorization. Notice that when compared
+ // to SuiteSparse we have to explicitly compute the transpose of Jt,
+ // and then the normal equations before they can be
+ // factorized. CHOLMOD/SuiteSparse on the other hand can just work
+ // off of Jt to compute the Cholesky factorization of the normal
+ // equations.
+ cs_di* A2 = cs_transpose(&At, 1);
+ cs_di* AtA = cs_multiply(&At,A2);
+
+ cxsparse_.Free(A2);
+ if (per_solve_options.D != NULL) {
+ A->DeleteRows(num_cols);
+ }
+
+ // Compute symbolic factorization if not available.
+ if (cxsparse_factor_ == NULL) {
+ cxsparse_factor_ = CHECK_NOTNULL(cxsparse_.AnalyzeCholesky(AtA));
+ }
+
+ // Solve the linear system.
+ if (cxsparse_.SolveCholesky(AtA, cxsparse_factor_, Atb.data())) {
+ VectorRef(x, Atb.rows()) = Atb;
+ summary.termination_type = TOLERANCE;
+ }
+
+ cxsparse_.Free(AtA);
+ return summary;
+}
+#else
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double * x) {
+ LOG(FATAL) << "No CXSparse support in Ceres.";
+
+ // Unreachable but MSVC does not know this.
+ return LinearSolver::Summary();
+}
+#endif
+
+#ifndef CERES_NO_SUITESPARSE
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double * x) {
+ const time_t start_time = time(NULL);
+ const int num_cols = A->num_cols();
+
+ LinearSolver::Summary summary;
+ Vector Atb = Vector::Zero(num_cols);
+ A->LeftMultiply(b, Atb.data());
+
+ if (per_solve_options.D != NULL) {
+ // Temporarily append a diagonal block to the A matrix, but undo it before
+ // returning the matrix to the user.
+ CompressedRowSparseMatrix D(per_solve_options.D, num_cols);
+ A->AppendRows(D);
+ }
+
+ VectorRef(x, num_cols).setZero();
+
+ scoped_ptr<cholmod_sparse> lhs(ss_.CreateSparseMatrixTransposeView(A));
+ CHECK_NOTNULL(lhs.get());
+
+ cholmod_dense* rhs = ss_.CreateDenseVector(Atb.data(), num_cols, num_cols);
+ const time_t init_time = time(NULL);
+
+ if (factor_ == NULL) {
+ if (options_.use_block_amd) {
+ factor_ = ss_.BlockAnalyzeCholesky(lhs.get(),
+ A->col_blocks(),
+ A->row_blocks());
+ } else {
+ factor_ = ss_.AnalyzeCholesky(lhs.get());
+ }
+
+ if (VLOG_IS_ON(2)) {
+ cholmod_print_common("Symbolic Analysis", ss_.mutable_cc());
+ }
+ }
+
+ CHECK_NOTNULL(factor_);
+
+ const time_t symbolic_time = time(NULL);
+
+ cholmod_dense* sol = ss_.SolveCholesky(lhs.get(), factor_, rhs);
+ const time_t solve_time = time(NULL);
+
+ ss_.Free(rhs);
+ rhs = NULL;
+
+ if (per_solve_options.D != NULL) {
+ A->DeleteRows(num_cols);
+ }
+
+ summary.num_iterations = 1;
+ if (sol != NULL) {
+ memcpy(x, sol->x, num_cols * sizeof(*x));
+
+ ss_.Free(sol);
+ sol = NULL;
+ summary.termination_type = TOLERANCE;
+ }
+
+ const time_t cleanup_time = time(NULL);
+ VLOG(2) << "time (sec) total: " << (cleanup_time - start_time)
+ << " init: " << (init_time - start_time)
+ << " symbolic: " << (symbolic_time - init_time)
+ << " solve: " << (solve_time - symbolic_time)
+ << " cleanup: " << (cleanup_time - solve_time);
+ return summary;
+}
+#else
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double * x) {
+ LOG(FATAL) << "No SuiteSparse support in Ceres.";
+
+ // Unreachable but MSVC does not know this.
+ return LinearSolver::Summary();
+}
+#endif
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/sparse_normal_cholesky_solver.h b/internal/ceres/sparse_normal_cholesky_solver.h
new file mode 100644
index 0000000..40d9e0a
--- /dev/null
+++ b/internal/ceres/sparse_normal_cholesky_solver.h
@@ -0,0 +1,93 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// A solver for sparse linear least squares problem based on solving
+// the normal equations via a sparse cholesky factorization.
+
+#ifndef CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
+#define CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
+
+#include "ceres/cxsparse.h"
+#include "ceres/linear_solver.h"
+#include "ceres/internal/macros.h"
+#include "ceres/suitesparse.h"
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+
+// Solves the normal equations (A'A + D'D) x = A'b, using the CHOLMOD sparse
+// cholesky solver.
+class SparseNormalCholeskySolver : public CompressedRowSparseMatrixSolver {
+ public:
+ explicit SparseNormalCholeskySolver(const LinearSolver::Options& options);
+ virtual ~SparseNormalCholeskySolver();
+
+ private:
+ virtual LinearSolver::Summary SolveImpl(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& options,
+ double* x);
+
+ LinearSolver::Summary SolveImplUsingSuiteSparse(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& options,
+ double* x);
+
+ // Crashes if CSparse is not installed.
+ LinearSolver::Summary SolveImplUsingCXSparse(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& options,
+ double* x);
+
+#ifndef CERES_NO_SUITESPARSE
+ SuiteSparse ss_;
+ // Cached factorization
+ cholmod_factor* factor_;
+#endif // CERES_NO_SUITESPARSE
+
+#ifndef CERES_NO_CXSPARSE
+ CXSparse cxsparse_;
+ // Cached factorization
+ cs_dis* cxsparse_factor_;
+#endif // CERES_NO_CXSPARSE
+
+ const LinearSolver::Options options_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(SparseNormalCholeskySolver);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
diff --git a/internal/ceres/split.cc b/internal/ceres/split.cc
new file mode 100644
index 0000000..4fa1bd4
--- /dev/null
+++ b/internal/ceres/split.cc
@@ -0,0 +1,115 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include <string>
+#include <vector>
+#include <iterator>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+// If we know how much to allocate for a vector of strings, we can allocate the
+// vector<string> only once and directly to the right size. This saves in
+// between 33-66 % of memory space needed for the result, and runs faster in the
+// microbenchmarks.
+//
+// The reserve is only implemented for the single character delim.
+//
+// The implementation for counting is cut-and-pasted from
+// SplitStringToIteratorUsing. I could have written my own counting iterator,
+// and use the existing template function, but probably this is more clear and
+// more sure to get optimized to reasonable code.
+static int CalculateReserveForVector(const string& full, const char* delim) {
+ int count = 0;
+ if (delim[0] != '\0' && delim[1] == '\0') {
+ // Optimize the common case where delim is a single character.
+ char c = delim[0];
+ const char* p = full.data();
+ const char* end = p + full.size();
+ while (p != end) {
+ if (*p == c) { // This could be optimized with hasless(v,1) trick.
+ ++p;
+ } else {
+ while (++p != end && *p != c) {
+ // Skip to the next occurence of the delimiter.
+ }
+ ++count;
+ }
+ }
+ }
+ return count;
+}
+
+template <typename StringType, typename ITR>
+static inline
+void SplitStringToIteratorUsing(const StringType& full,
+ const char* delim,
+ ITR& result) {
+ // Optimize the common case where delim is a single character.
+ if (delim[0] != '\0' && delim[1] == '\0') {
+ char c = delim[0];
+ const char* p = full.data();
+ const char* end = p + full.size();
+ while (p != end) {
+ if (*p == c) {
+ ++p;
+ } else {
+ const char* start = p;
+ while (++p != end && *p != c) {
+ // Skip to the next occurence of the delimiter.
+ }
+ *result++ = StringType(start, p - start);
+ }
+ }
+ return;
+ }
+
+ string::size_type begin_index, end_index;
+ begin_index = full.find_first_not_of(delim);
+ while (begin_index != string::npos) {
+ end_index = full.find_first_of(delim, begin_index);
+ if (end_index == string::npos) {
+ *result++ = full.substr(begin_index);
+ return;
+ }
+ *result++ = full.substr(begin_index, (end_index - begin_index));
+ begin_index = full.find_first_not_of(delim, end_index);
+ }
+}
+
+void SplitStringUsing(const string& full,
+ const char* delim,
+ vector<string>* result) {
+ result->reserve(result->size() + CalculateReserveForVector(full, delim));
+ back_insert_iterator< vector<string> > it(*result);
+ SplitStringToIteratorUsing(full, delim, it);
+}
+
+} // namespace ceres
diff --git a/internal/ceres/split.h b/internal/ceres/split.h
new file mode 100644
index 0000000..4df48c3
--- /dev/null
+++ b/internal/ceres/split.h
@@ -0,0 +1,21 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_SPLIT_H_
+#define CERES_INTERNAL_SPLIT_H_
+
+#include <string>
+#include <vector>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+// Split a string using one or more character delimiters, presented as a
+// nul-terminated c string. Append the components to 'result'. If there are
+// consecutive delimiters, this function skips over all of them.
+void SplitStringUsing(const string& full, const char* delim,
+ vector<string>* res);
+
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SPLIT_H_
diff --git a/internal/ceres/stl_util.h b/internal/ceres/stl_util.h
new file mode 100644
index 0000000..a1a19e8
--- /dev/null
+++ b/internal/ceres/stl_util.h
@@ -0,0 +1,75 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_STL_UTIL_H_
+#define CERES_INTERNAL_STL_UTIL_H_
+
+namespace ceres {
+
+// STLDeleteContainerPointers()
+// For a range within a container of pointers, calls delete
+// (non-array version) on these pointers.
+// NOTE: for these three functions, we could just implement a DeleteObject
+// functor and then call for_each() on the range and functor, but this
+// requires us to pull in all of algorithm.h, which seems expensive.
+// For hash_[multi]set, it is important that this deletes behind the iterator
+// because the hash_set may call the hash function on the iterator when it is
+// advanced, which could result in the hash function trying to deference a
+// stale pointer.
+template <class ForwardIterator>
+void STLDeleteContainerPointers(ForwardIterator begin,
+ ForwardIterator end) {
+ while (begin != end) {
+ ForwardIterator temp = begin;
+ ++begin;
+ delete *temp;
+ }
+}
+
+// STLDeleteElements() deletes all the elements in an STL container and clears
+// the container. This function is suitable for use with a vector, set,
+// hash_set, or any other STL container which defines sensible begin(), end(),
+// and clear() methods.
+//
+// If container is NULL, this function is a no-op.
+//
+// As an alternative to calling STLDeleteElements() directly, consider
+// ElementDeleter (defined below), which ensures that your container's elements
+// are deleted when the ElementDeleter goes out of scope.
+template <class T>
+void STLDeleteElements(T *container) {
+ if (!container) return;
+ STLDeleteContainerPointers(container->begin(), container->end());
+ container->clear();
+}
+
+} // namespace ceres
+
+#endif // CERES_INTERNAL_STL_UTIL_H_
diff --git a/internal/ceres/stringprintf.cc b/internal/ceres/stringprintf.cc
new file mode 100644
index 0000000..c0f3522
--- /dev/null
+++ b/internal/ceres/stringprintf.cc
@@ -0,0 +1,126 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+
+#include <cerrno>
+#include <cstdarg> // For va_list and related operations
+#include <cstdio> // MSVC requires this for _vsnprintf
+#include <string>
+#include <vector>
+
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+#ifdef _MSC_VER
+enum { IS_COMPILER_MSVC = 1 };
+#define va_copy(d,s) ((d) = (s))
+#else
+enum { IS_COMPILER_MSVC = 0 };
+#endif
+
+void StringAppendV(string* dst, const char* format, va_list ap) {
+ // First try with a small fixed size buffer
+ char space[1024];
+
+ // It's possible for methods that use a va_list to invalidate
+ // the data in it upon use. The fix is to make a copy
+ // of the structure before using it and use that copy instead.
+ va_list backup_ap;
+ va_copy(backup_ap, ap);
+ int result = vsnprintf(space, sizeof(space), format, backup_ap);
+ va_end(backup_ap);
+
+ if (result < sizeof(space)) {
+ if (result >= 0) {
+ // Normal case -- everything fit.
+ dst->append(space, result);
+ return;
+ }
+
+ if (IS_COMPILER_MSVC) {
+ // Error or MSVC running out of space. MSVC 8.0 and higher
+ // can be asked about space needed with the special idiom below:
+ va_copy(backup_ap, ap);
+ result = vsnprintf(NULL, 0, format, backup_ap);
+ va_end(backup_ap);
+ }
+
+ if (result < 0) {
+ // Just an error.
+ return;
+ }
+ }
+
+ // Increase the buffer size to the size requested by vsnprintf,
+ // plus one for the closing \0.
+ int length = result+1;
+ char* buf = new char[length];
+
+ // Restore the va_list before we use it again
+ va_copy(backup_ap, ap);
+ result = vsnprintf(buf, length, format, backup_ap);
+ va_end(backup_ap);
+
+ if (result >= 0 && result < length) {
+ // It fit
+ dst->append(buf, result);
+ }
+ delete[] buf;
+}
+
+
+string StringPrintf(const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ string result;
+ StringAppendV(&result, format, ap);
+ va_end(ap);
+ return result;
+}
+
+const string& SStringPrintf(string* dst, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ dst->clear();
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+ return *dst;
+}
+
+void StringAppendF(string* dst, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/stringprintf.h b/internal/ceres/stringprintf.h
new file mode 100644
index 0000000..f2f907a
--- /dev/null
+++ b/internal/ceres/stringprintf.h
@@ -0,0 +1,89 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+//
+// Printf variants that place their output in a C++ string.
+//
+// Usage:
+// string result = StringPrintf("%d %s\n", 10, "hello");
+// SStringPrintf(&result, "%d %s\n", 10, "hello");
+// StringAppendF(&result, "%d %s\n", 20, "there");
+
+#ifndef CERES_INTERNAL_STRINGPRINTF_H_
+#define CERES_INTERNAL_STRINGPRINTF_H_
+
+#include <cstdarg>
+#include <string>
+
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+#if (defined(__GNUC__) || defined(__clang__))
+// Tell the compiler to do printf format string checking if the compiler
+// supports it; see the 'format' attribute in
+// <http://gcc.gnu.org/onlinedocs/gcc-4.3.0/gcc/Function-Attributes.html>.
+//
+// N.B.: As the GCC manual states, "[s]ince non-static C++ methods
+// have an implicit 'this' argument, the arguments of such methods
+// should be counted from two, not one."
+#define CERES_PRINTF_ATTRIBUTE(string_index, first_to_check) \
+ __attribute__((__format__ (__printf__, string_index, first_to_check)))
+#define CERES_SCANF_ATTRIBUTE(string_index, first_to_check) \
+ __attribute__((__format__ (__scanf__, string_index, first_to_check)))
+#else
+#define CERES_PRINTF_ATTRIBUTE(string_index, first_to_check)
+#endif
+
+// Return a C++ string.
+extern string StringPrintf(const char* format, ...)
+ // Tell the compiler to do printf format string checking.
+ CERES_PRINTF_ATTRIBUTE(1,2);
+
+// Store result into a supplied string and return it.
+extern const string& SStringPrintf(string* dst, const char* format, ...)
+ // Tell the compiler to do printf format string checking.
+ CERES_PRINTF_ATTRIBUTE(2,3);
+
+// Append result to a supplied string.
+extern void StringAppendF(string* dst, const char* format, ...)
+ // Tell the compiler to do printf format string checking.
+ CERES_PRINTF_ATTRIBUTE(2,3);
+
+// Lower-level routine that takes a va_list and appends to a specified string.
+// All other routines are just convenience wrappers around it.
+extern void StringAppendV(string* dst, const char* format, va_list ap);
+
+#undef CERES_PRINTF_ATTRIBUTE
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_STRINGPRINTF_H_
diff --git a/internal/ceres/suitesparse.cc b/internal/ceres/suitesparse.cc
new file mode 100644
index 0000000..cf3c48f
--- /dev/null
+++ b/internal/ceres/suitesparse.cc
@@ -0,0 +1,346 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_NO_SUITESPARSE
+#include "ceres/suitesparse.h"
+
+#include <vector>
+#include "cholmod.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/triplet_sparse_matrix.h"
+namespace ceres {
+namespace internal {
+cholmod_sparse* SuiteSparse::CreateSparseMatrix(TripletSparseMatrix* A) {
+ cholmod_triplet triplet;
+
+ triplet.nrow = A->num_rows();
+ triplet.ncol = A->num_cols();
+ triplet.nzmax = A->max_num_nonzeros();
+ triplet.nnz = A->num_nonzeros();
+ triplet.i = reinterpret_cast<void*>(A->mutable_rows());
+ triplet.j = reinterpret_cast<void*>(A->mutable_cols());
+ triplet.x = reinterpret_cast<void*>(A->mutable_values());
+ triplet.stype = 0; // Matrix is not symmetric.
+ triplet.itype = CHOLMOD_INT;
+ triplet.xtype = CHOLMOD_REAL;
+ triplet.dtype = CHOLMOD_DOUBLE;
+
+ return cholmod_triplet_to_sparse(&triplet, triplet.nnz, &cc_);
+}
+
+
+cholmod_sparse* SuiteSparse::CreateSparseMatrixTranspose(
+ TripletSparseMatrix* A) {
+ cholmod_triplet triplet;
+
+ triplet.ncol = A->num_rows(); // swap row and columns
+ triplet.nrow = A->num_cols();
+ triplet.nzmax = A->max_num_nonzeros();
+ triplet.nnz = A->num_nonzeros();
+
+ // swap rows and columns
+ triplet.j = reinterpret_cast<void*>(A->mutable_rows());
+ triplet.i = reinterpret_cast<void*>(A->mutable_cols());
+ triplet.x = reinterpret_cast<void*>(A->mutable_values());
+ triplet.stype = 0; // Matrix is not symmetric.
+ triplet.itype = CHOLMOD_INT;
+ triplet.xtype = CHOLMOD_REAL;
+ triplet.dtype = CHOLMOD_DOUBLE;
+
+ return cholmod_triplet_to_sparse(&triplet, triplet.nnz, &cc_);
+}
+
+cholmod_sparse* SuiteSparse::CreateSparseMatrixTransposeView(
+ CompressedRowSparseMatrix* A) {
+ cholmod_sparse* m = new cholmod_sparse_struct;
+ m->nrow = A->num_cols();
+ m->ncol = A->num_rows();
+ m->nzmax = A->num_nonzeros();
+
+ m->p = reinterpret_cast<void*>(A->mutable_rows());
+ m->i = reinterpret_cast<void*>(A->mutable_cols());
+ m->x = reinterpret_cast<void*>(A->mutable_values());
+
+ m->stype = 0; // Matrix is not symmetric.
+ m->itype = CHOLMOD_INT;
+ m->xtype = CHOLMOD_REAL;
+ m->dtype = CHOLMOD_DOUBLE;
+ m->sorted = 1;
+ m->packed = 1;
+
+ return m;
+}
+
+cholmod_dense* SuiteSparse::CreateDenseVector(const double* x,
+ int in_size,
+ int out_size) {
+ CHECK_LE(in_size, out_size);
+ cholmod_dense* v = cholmod_zeros(out_size, 1, CHOLMOD_REAL, &cc_);
+ if (x != NULL) {
+ memcpy(v->x, x, in_size*sizeof(*x));
+ }
+ return v;
+}
+
+cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A) {
+ // Cholmod can try multiple re-ordering strategies to find a fill
+ // reducing ordering. Here we just tell it use AMD with automatic
+ // matrix dependence choice of supernodal versus simplicial
+ // factorization.
+ cc_.nmethods = 1;
+ cc_.method[0].ordering = CHOLMOD_AMD;
+ cc_.supernodal = CHOLMOD_AUTO;
+ cholmod_factor* factor = cholmod_analyze(A, &cc_);
+ CHECK_EQ(cc_.status, CHOLMOD_OK)
+ << "Cholmod symbolic analysis failed " << cc_.status;
+ CHECK_NOTNULL(factor);
+ return factor;
+}
+
+cholmod_factor* SuiteSparse::BlockAnalyzeCholesky(
+ cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks) {
+ vector<int> ordering;
+ if (!BlockAMDOrdering(A, row_blocks, col_blocks, &ordering)) {
+ return NULL;
+ }
+ return AnalyzeCholeskyWithUserOrdering(A, ordering);
+}
+
+cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A,
+ const vector<int>& ordering) {
+ CHECK_EQ(ordering.size(), A->nrow);
+ cc_.nmethods = 1 ;
+ cc_.method[0].ordering = CHOLMOD_GIVEN;
+ cholmod_factor* factor =
+ cholmod_analyze_p(A, const_cast<int*>(&ordering[0]), NULL, 0, &cc_);
+ CHECK_EQ(cc_.status, CHOLMOD_OK)
+ << "Cholmod symbolic analysis failed " << cc_.status;
+ CHECK_NOTNULL(factor);
+ return factor;
+}
+
+bool SuiteSparse::BlockAMDOrdering(const cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks,
+ vector<int>* ordering) {
+ const int num_row_blocks = row_blocks.size();
+ const int num_col_blocks = col_blocks.size();
+
+ // Arrays storing the compressed column structure of the matrix
+ // incoding the block sparsity of A.
+ vector<int> block_cols;
+ vector<int> block_rows;
+
+ ScalarMatrixToBlockMatrix(A,
+ row_blocks,
+ col_blocks,
+ &block_rows,
+ &block_cols);
+
+ cholmod_sparse_struct block_matrix;
+ block_matrix.nrow = num_row_blocks;
+ block_matrix.ncol = num_col_blocks;
+ block_matrix.nzmax = block_rows.size();
+ block_matrix.p = reinterpret_cast<void*>(&block_cols[0]);
+ block_matrix.i = reinterpret_cast<void*>(&block_rows[0]);
+ block_matrix.x = NULL;
+ block_matrix.stype = A->stype;
+ block_matrix.itype = CHOLMOD_INT;
+ block_matrix.xtype = CHOLMOD_PATTERN;
+ block_matrix.dtype = CHOLMOD_DOUBLE;
+ block_matrix.sorted = 1;
+ block_matrix.packed = 1;
+
+ vector<int> block_ordering(num_row_blocks);
+ if (!cholmod_amd(&block_matrix, NULL, 0, &block_ordering[0], &cc_)) {
+ return false;
+ }
+
+ BlockOrderingToScalarOrdering(row_blocks, block_ordering, ordering);
+ return true;
+}
+
+void SuiteSparse::ScalarMatrixToBlockMatrix(const cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks,
+ vector<int>* block_rows,
+ vector<int>* block_cols) {
+ CHECK_NOTNULL(block_rows)->clear();
+ CHECK_NOTNULL(block_cols)->clear();
+ const int num_row_blocks = row_blocks.size();
+ const int num_col_blocks = col_blocks.size();
+
+ vector<int> row_block_starts(num_row_blocks);
+ for (int i = 0, cursor = 0; i < num_row_blocks; ++i) {
+ row_block_starts[i] = cursor;
+ cursor += row_blocks[i];
+ }
+
+ // The reinterpret_cast is needed here because CHOLMOD stores arrays
+ // as void*.
+ const int* scalar_cols = reinterpret_cast<const int*>(A->p);
+ const int* scalar_rows = reinterpret_cast<const int*>(A->i);
+
+ // This loop extracts the block sparsity of the scalar sparse matrix
+ // A. It does so by iterating over the columns, but only considering
+ // the columns corresponding to the first element of each column
+ // block. Within each column, the inner loop iterates over the rows,
+ // and detects the presence of a row block by checking for the
+ // presence of a non-zero entry corresponding to its first element.
+ block_cols->push_back(0);
+ int c = 0;
+ for (int col_block = 0; col_block < num_col_blocks; ++col_block) {
+ int column_size = 0;
+ for (int idx = scalar_cols[c]; idx < scalar_cols[c + 1]; ++idx) {
+ vector<int>::const_iterator it = lower_bound(row_block_starts.begin(),
+ row_block_starts.end(),
+ scalar_rows[idx]);
+ // Since we are using lower_bound, it will return the row id
+ // where the row block starts. For everything but the first row
+ // of the block, where these values will be the same, we can
+ // skip, as we only need the first row to detect the presence of
+ // the block.
+ //
+ // For rows all but the first row in the last row block,
+ // lower_bound will return row_block_starts.end(), but those can
+ // be skipped like the rows in other row blocks too.
+ if (it == row_block_starts.end() || *it != scalar_rows[idx]) {
+ continue;
+ }
+
+ block_rows->push_back(it - row_block_starts.begin());
+ ++column_size;
+ }
+ block_cols->push_back(block_cols->back() + column_size);
+ c += col_blocks[col_block];
+ }
+}
+
+void SuiteSparse::BlockOrderingToScalarOrdering(
+ const vector<int>& blocks,
+ const vector<int>& block_ordering,
+ vector<int>* scalar_ordering) {
+ CHECK_EQ(blocks.size(), block_ordering.size());
+ const int num_blocks = blocks.size();
+
+ // block_starts = [0, block1, block1 + block2 ..]
+ vector<int> block_starts(num_blocks);
+ for (int i = 0, cursor = 0; i < num_blocks ; ++i) {
+ block_starts[i] = cursor;
+ cursor += blocks[i];
+ }
+
+ scalar_ordering->resize(block_starts.back() + blocks.back());
+ int cursor = 0;
+ for (int i = 0; i < num_blocks; ++i) {
+ const int block_id = block_ordering[i];
+ const int block_size = blocks[block_id];
+ int block_position = block_starts[block_id];
+ for (int j = 0; j < block_size; ++j) {
+ (*scalar_ordering)[cursor++] = block_position++;
+ }
+ }
+}
+
+bool SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) {
+ CHECK_NOTNULL(A);
+ CHECK_NOTNULL(L);
+
+ cc_.quick_return_if_not_posdef = 1;
+ int status = cholmod_factorize(A, L, &cc_);
+ switch (cc_.status) {
+ case CHOLMOD_NOT_INSTALLED:
+ LOG(WARNING) << "Cholmod failure: method not installed.";
+ return false;
+ case CHOLMOD_OUT_OF_MEMORY:
+ LOG(WARNING) << "Cholmod failure: out of memory.";
+ return false;
+ case CHOLMOD_TOO_LARGE:
+ LOG(WARNING) << "Cholmod failure: integer overflow occured.";
+ return false;
+ case CHOLMOD_INVALID:
+ LOG(WARNING) << "Cholmod failure: invalid input.";
+ return false;
+ case CHOLMOD_NOT_POSDEF:
+ // TODO(sameeragarwal): These two warnings require more
+ // sophisticated handling going forward. For now we will be
+ // strict and treat them as failures.
+ LOG(WARNING) << "Cholmod warning: matrix not positive definite.";
+ return false;
+ case CHOLMOD_DSMALL:
+ LOG(WARNING) << "Cholmod warning: D for LDL' or diag(L) or "
+ << "LL' has tiny absolute value.";
+ return false;
+ case CHOLMOD_OK:
+ if (status != 0) {
+ return true;
+ }
+ LOG(WARNING) << "Cholmod failure: cholmod_factorize returned zero "
+ << "but cholmod_common::status is CHOLMOD_OK."
+ << "Please report this to ceres-solver@googlegroups.com.";
+ return false;
+ default:
+ LOG(WARNING) << "Unknown cholmod return code. "
+ << "Please report this to ceres-solver@googlegroups.com.";
+ return false;
+ }
+ return false;
+}
+
+cholmod_dense* SuiteSparse::Solve(cholmod_factor* L,
+ cholmod_dense* b) {
+ if (cc_.status != CHOLMOD_OK) {
+ LOG(WARNING) << "CHOLMOD status NOT OK";
+ return NULL;
+ }
+
+ return cholmod_solve(CHOLMOD_A, L, b, &cc_);
+}
+
+cholmod_dense* SuiteSparse::SolveCholesky(cholmod_sparse* A,
+ cholmod_factor* L,
+ cholmod_dense* b) {
+ CHECK_NOTNULL(A);
+ CHECK_NOTNULL(L);
+ CHECK_NOTNULL(b);
+
+ if (Cholesky(A, L)) {
+ return Solve(L, b);
+ }
+
+ return NULL;
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_NO_SUITESPARSE
diff --git a/internal/ceres/suitesparse.h b/internal/ceres/suitesparse.h
new file mode 100644
index 0000000..eb691c0
--- /dev/null
+++ b/internal/ceres/suitesparse.h
@@ -0,0 +1,233 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// A simple C++ interface to the SuiteSparse and CHOLMOD libraries.
+
+#ifndef CERES_INTERNAL_SUITESPARSE_H_
+#define CERES_INTERNAL_SUITESPARSE_H_
+
+#ifndef CERES_NO_SUITESPARSE
+
+#include <cstring>
+#include <string>
+#include <vector>
+
+#include <glog/logging.h>
+#include "cholmod.h"
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+class TripletSparseMatrix;
+
+// The raw CHOLMOD and SuiteSparseQR libraries have a slightly
+// cumbersome c like calling format. This object abstracts it away and
+// provides the user with a simpler interface. The methods here cannot
+// be static as a cholmod_common object serves as a global variable
+// for all cholmod function calls.
+class SuiteSparse {
+ public:
+ SuiteSparse() { cholmod_start(&cc_); }
+ ~SuiteSparse() { cholmod_finish(&cc_); }
+
+ // Functions for building cholmod_sparse objects from sparse
+ // matrices stored in triplet form. The matrix A is not
+ // modifed. Called owns the result.
+ cholmod_sparse* CreateSparseMatrix(TripletSparseMatrix* A);
+
+ // This function works like CreateSparseMatrix, except that the
+ // return value corresponds to A' rather than A.
+ cholmod_sparse* CreateSparseMatrixTranspose(TripletSparseMatrix* A);
+
+ // Create a cholmod_sparse wrapper around the contents of A. This is
+ // a shallow object, which refers to the contents of A and does not
+ // use the SuiteSparse machinery to allocate memory, this object
+ // should be disposed off with a delete and not a call to Free as is
+ // the case for objects returned by CreateSparseMatrixTranspose.
+ cholmod_sparse* CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
+
+ // Given a vector x, build a cholmod_dense vector of size out_size
+ // with the first in_size entries copied from x. If x is NULL, then
+ // an all zeros vector is returned. Caller owns the result.
+ cholmod_dense* CreateDenseVector(const double* x, int in_size, int out_size);
+
+ // The matrix A is scaled using the matrix whose diagonal is the
+ // vector scale. mode describes how scaling is applied. Possible
+ // values are CHOLMOD_ROW for row scaling - diag(scale) * A,
+ // CHOLMOD_COL for column scaling - A * diag(scale) and CHOLMOD_SYM
+ // for symmetric scaling which scales both the rows and the columns
+ // - diag(scale) * A * diag(scale).
+ void Scale(cholmod_dense* scale, int mode, cholmod_sparse* A) {
+ cholmod_scale(scale, mode, A, &cc_);
+ }
+
+ // Create and return a matrix m = A * A'. Caller owns the
+ // result. The matrix A is not modified.
+ cholmod_sparse* AATranspose(cholmod_sparse* A) {
+ cholmod_sparse*m = cholmod_aat(A, NULL, A->nrow, 1, &cc_);
+ m->stype = 1; // Pay attention to the upper triangular part.
+ return m;
+ }
+
+ // y = alpha * A * x + beta * y. Only y is modified.
+ void SparseDenseMultiply(cholmod_sparse* A, double alpha, double beta,
+ cholmod_dense* x, cholmod_dense* y) {
+ double alpha_[2] = {alpha, 0};
+ double beta_[2] = {beta, 0};
+ cholmod_sdmult(A, 0, alpha_, beta_, x, y, &cc_);
+ }
+
+ // Find an ordering of A or AA' (if A is unsymmetric) that minimizes
+ // the fill-in in the Cholesky factorization of the corresponding
+ // matrix. This is done by using the AMD algorithm.
+ //
+ // Using this ordering, the symbolic Cholesky factorization of A (or
+ // AA') is computed and returned.
+ //
+ // A is not modified, only the pattern of non-zeros of A is used,
+ // the actual numerical values in A are of no consequence.
+ //
+ // Caller owns the result.
+ cholmod_factor* AnalyzeCholesky(cholmod_sparse* A);
+
+ cholmod_factor* BlockAnalyzeCholesky(cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks);
+
+ // If A is symmetric, then compute the symbolic Cholesky
+ // factorization of A(ordering, ordering). If A is unsymmetric, then
+ // compute the symbolic factorization of
+ // A(ordering,:) A(ordering,:)'.
+ //
+ // A is not modified, only the pattern of non-zeros of A is used,
+ // the actual numerical values in A are of no consequence.
+ //
+ // Caller owns the result.
+ cholmod_factor* AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A,
+ const vector<int>& ordering);
+
+ // Use the symbolic factorization in L, to find the numerical
+ // factorization for the matrix A or AA^T. Return true if
+ // successful, false otherwise. L contains the numeric factorization
+ // on return.
+ bool Cholesky(cholmod_sparse* A, cholmod_factor* L);
+
+ // Given a Cholesky factorization of a matrix A = LL^T, solve the
+ // linear system Ax = b, and return the result. If the Solve fails
+ // NULL is returned. Caller owns the result.
+ cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b);
+
+ // Combine the calls to Cholesky and Solve into a single call. If
+ // the cholesky factorization or the solve fails, return
+ // NULL. Caller owns the result.
+ cholmod_dense* SolveCholesky(cholmod_sparse* A,
+ cholmod_factor* L,
+ cholmod_dense* b);
+
+ // By virtue of the modeling layer in Ceres being block oriented,
+ // all the matrices used by Ceres are also block oriented. When
+ // doing sparse direct factorization of these matrices the
+ // fill-reducing ordering algorithms (in particular AMD) can either
+ // be run on the block or the scalar form of these matrices. The two
+ // SuiteSparse::AnalyzeCholesky methods allows the the client to
+ // compute the symbolic factorization of a matrix by either using
+ // AMD on the matrix or a user provided ordering of the rows.
+ //
+ // But since the underlying matrices are block oriented, it is worth
+ // running AMD on just the block structre of these matrices and then
+ // lifting these block orderings to a full scalar ordering. This
+ // preserves the block structure of the permuted matrix, and exposes
+ // more of the super-nodal structure of the matrix to the numerical
+ // factorization routines.
+ //
+ // Find the block oriented AMD ordering of a matrix A, whose row and
+ // column blocks are given by row_blocks, and col_blocks
+ // respectively. The matrix may or may not be symmetric. The entries
+ // of col_blocks do not need to sum to the number of columns in
+ // A. If this is the case, only the first sum(col_blocks) are used
+ // to compute the ordering.
+ bool BlockAMDOrdering(const cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks,
+ vector<int>* ordering);
+
+ // Given a set of blocks and a permutation of these blocks, compute
+ // the corresponding "scalar" ordering, where the scalar ordering of
+ // size sum(blocks).
+ static void BlockOrderingToScalarOrdering(const vector<int>& blocks,
+ const vector<int>& block_ordering,
+ vector<int>* scalar_ordering);
+
+ // Extract the block sparsity pattern of the scalar sparse matrix
+ // A and return it in compressed column form. The compressed column
+ // form is stored in two vectors block_rows, and block_cols, which
+ // correspond to the row and column arrays in a compressed column sparse
+ // matrix.
+ //
+ // If c_ij is the block in the matrix A corresponding to row block i
+ // and column block j, then it is expected that A contains at least
+ // one non-zero entry corresponding to the top left entry of c_ij,
+ // as that entry is used to detect the presence of a non-zero c_ij.
+ static void ScalarMatrixToBlockMatrix(const cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks,
+ vector<int>* block_rows,
+ vector<int>* block_cols);
+
+ void Free(cholmod_sparse* m) { cholmod_free_sparse(&m, &cc_); }
+ void Free(cholmod_dense* m) { cholmod_free_dense(&m, &cc_); }
+ void Free(cholmod_factor* m) { cholmod_free_factor(&m, &cc_); }
+
+ void Print(cholmod_sparse* m, const string& name) {
+ cholmod_print_sparse(m, const_cast<char*>(name.c_str()), &cc_);
+ }
+
+ void Print(cholmod_dense* m, const string& name) {
+ cholmod_print_dense(m, const_cast<char*>(name.c_str()), &cc_);
+ }
+
+ void Print(cholmod_triplet* m, const string& name) {
+ cholmod_print_triplet(m, const_cast<char*>(name.c_str()), &cc_);
+ }
+
+ cholmod_common* mutable_cc() { return &cc_; }
+
+ private:
+ cholmod_common cc_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_NO_SUITESPARSE
+
+#endif // CERES_INTERNAL_SUITESPARSE_H_
diff --git a/internal/ceres/suitesparse_test.cc b/internal/ceres/suitesparse_test.cc
new file mode 100644
index 0000000..72e3e68
--- /dev/null
+++ b/internal/ceres/suitesparse_test.cc
@@ -0,0 +1,194 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <algorithm>
+#include "ceres/internal/port.h"
+#include "ceres/suitesparse.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(SuiteSparse, BlockPermutationToScalarPermutation) {
+ vector<int> blocks;
+ // Block structure
+ // 0 --1- ---2--- ---3--- 4
+ // [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ blocks.push_back(1);
+ blocks.push_back(2);
+ blocks.push_back(3);
+ blocks.push_back(3);
+ blocks.push_back(1);
+
+ // Block ordering
+ // [1, 0, 2, 4, 5]
+ vector<int> block_ordering;
+ block_ordering.push_back(1);
+ block_ordering.push_back(0);
+ block_ordering.push_back(2);
+ block_ordering.push_back(4);
+ block_ordering.push_back(3);
+
+ // Expected ordering
+ // [1, 2, 0, 3, 4, 5, 9, 6, 7, 8]
+ vector<int> expected_scalar_ordering;
+ expected_scalar_ordering.push_back(1);
+ expected_scalar_ordering.push_back(2);
+ expected_scalar_ordering.push_back(0);
+ expected_scalar_ordering.push_back(3);
+ expected_scalar_ordering.push_back(4);
+ expected_scalar_ordering.push_back(5);
+ expected_scalar_ordering.push_back(9);
+ expected_scalar_ordering.push_back(6);
+ expected_scalar_ordering.push_back(7);
+ expected_scalar_ordering.push_back(8);
+
+ vector<int> scalar_ordering;
+ SuiteSparse::BlockOrderingToScalarOrdering(blocks,
+ block_ordering,
+ &scalar_ordering);
+ EXPECT_EQ(scalar_ordering.size(), expected_scalar_ordering.size());
+ for (int i = 0; i < expected_scalar_ordering.size(); ++i) {
+ EXPECT_EQ(scalar_ordering[i], expected_scalar_ordering[i]);
+ }
+}
+
+// Helper function to fill the sparsity pattern of a TripletSparseMatrix.
+int FillBlock(const vector<int>& row_blocks,
+ const vector<int>& col_blocks,
+ const int row_block_id,
+ const int col_block_id,
+ int* rows,
+ int* cols) {
+ int row_pos = 0;
+ for (int i = 0; i < row_block_id; ++i) {
+ row_pos += row_blocks[i];
+ }
+
+ int col_pos = 0;
+ for (int i = 0; i < col_block_id; ++i) {
+ col_pos += col_blocks[i];
+ }
+
+ int offset = 0;
+ for (int r = 0; r < row_blocks[row_block_id]; ++r) {
+ for (int c = 0; c < col_blocks[col_block_id]; ++c, ++offset) {
+ rows[offset] = row_pos + r;
+ cols[offset] = col_pos + c;
+ }
+ }
+ return offset;
+}
+
+TEST(SuiteSparse, ScalarMatrixToBlockMatrix) {
+ // Block sparsity.
+ //
+ // [1 2 3 2]
+ // [1] x x
+ // [2] x x
+ // [2] x x
+ // num_nonzeros = 1 + 3 + 4 + 4 + 1 + 2 = 15
+
+ vector<int> col_blocks;
+ col_blocks.push_back(1);
+ col_blocks.push_back(2);
+ col_blocks.push_back(3);
+ col_blocks.push_back(2);
+
+ vector<int> row_blocks;
+ row_blocks.push_back(1);
+ row_blocks.push_back(2);
+ row_blocks.push_back(2);
+
+ TripletSparseMatrix tsm(5, 8, 18);
+ int* rows = tsm.mutable_rows();
+ int* cols = tsm.mutable_cols();
+ fill(tsm.mutable_values(), tsm.mutable_values() + 18, 1.0);
+ int offset = 0;
+
+#define CERES_TEST_FILL_BLOCK(row_block_id, col_block_id) \
+ offset += FillBlock(row_blocks, col_blocks, \
+ row_block_id, col_block_id, \
+ rows + offset, cols + offset);
+
+ CERES_TEST_FILL_BLOCK(0, 0);
+ CERES_TEST_FILL_BLOCK(2, 0);
+ CERES_TEST_FILL_BLOCK(1, 1);
+ CERES_TEST_FILL_BLOCK(2, 1);
+ CERES_TEST_FILL_BLOCK(0, 2);
+ CERES_TEST_FILL_BLOCK(1, 3);
+#undef CERES_TEST_FILL_BLOCK
+
+ tsm.set_num_nonzeros(offset);
+
+ SuiteSparse ss;
+ scoped_ptr<cholmod_sparse> ccsm(ss.CreateSparseMatrix(&tsm));
+
+ vector<int> expected_block_rows;
+ expected_block_rows.push_back(0);
+ expected_block_rows.push_back(2);
+ expected_block_rows.push_back(1);
+ expected_block_rows.push_back(2);
+ expected_block_rows.push_back(0);
+ expected_block_rows.push_back(1);
+
+ vector<int> expected_block_cols;
+ expected_block_cols.push_back(0);
+ expected_block_cols.push_back(2);
+ expected_block_cols.push_back(4);
+ expected_block_cols.push_back(5);
+ expected_block_cols.push_back(6);
+
+ vector<int> block_rows;
+ vector<int> block_cols;
+ SuiteSparse::ScalarMatrixToBlockMatrix(ccsm.get(),
+ row_blocks,
+ col_blocks,
+ &block_rows,
+ &block_cols);
+
+ EXPECT_EQ(block_cols.size(), expected_block_cols.size());
+ EXPECT_EQ(block_rows.size(), expected_block_rows.size());
+
+ for (int i = 0; i < expected_block_cols.size(); ++i) {
+ EXPECT_EQ(block_cols[i], expected_block_cols[i]);
+ }
+
+ for (int i = 0; i < expected_block_rows.size(); ++i) {
+ EXPECT_EQ(block_rows[i], expected_block_rows[i]);
+ }
+
+ ss.Free(ccsm.release());
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/symmetric_linear_solver_test.cc b/internal/ceres/symmetric_linear_solver_test.cc
new file mode 100644
index 0000000..f33adb4
--- /dev/null
+++ b/internal/ceres/symmetric_linear_solver_test.cc
@@ -0,0 +1,139 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: fredp@google.com (Fred Pighin)
+//
+// Tests for linear solvers that solve symmetric linear systems. Some
+// of this code is inhertited from Fred Pighin's code for testing the
+// old Conjugate Gradients solver.
+//
+// TODO(sameeragarwal): More comprehensive testing with larger and
+// more badly conditioned problem.
+
+#include "gtest/gtest.h"
+#include "ceres/conjugate_gradients_solver.h"
+#include "ceres/linear_solver.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(ConjugateGradientTest, Solves3x3IdentitySystem) {
+ double diagonal[] = { 1.0, 1.0, 1.0 };
+ scoped_ptr<TripletSparseMatrix>
+ A(TripletSparseMatrix::CreateSparseDiagonalMatrix(diagonal, 3));
+ Vector b(3);
+ Vector x(3);
+
+ b(0) = 1.0;
+ b(1) = 2.0;
+ b(2) = 3.0;
+
+ x(0) = 1;
+ x(1) = 1;
+ x(2) = 1;
+
+ LinearSolver::Options options;
+ options.max_num_iterations = 10;
+
+ LinearSolver::PerSolveOptions per_solve_options;
+ per_solve_options.r_tolerance = 1e-9;
+
+ ConjugateGradientsSolver solver(options);
+ LinearSolver::Summary summary =
+ solver.Solve(A.get(), b.data(), per_solve_options, x.data());
+
+ EXPECT_EQ(summary.termination_type, TOLERANCE);
+ ASSERT_EQ(summary.num_iterations, 1);
+
+ ASSERT_DOUBLE_EQ(1, x(0));
+ ASSERT_DOUBLE_EQ(2, x(1));
+ ASSERT_DOUBLE_EQ(3, x(2));
+}
+
+
+TEST(ConjuateGradientTest, Solves3x3SymmetricSystem) {
+ scoped_ptr<TripletSparseMatrix> A(new TripletSparseMatrix(3, 3, 9));
+ Vector b(3);
+ Vector x(3);
+
+ // | 2 -1 0|
+ // A = |-1 2 -1| is symmetric positive definite.
+ // | 0 -1 2|
+ int* Ai = A->mutable_rows();
+ int* Aj = A->mutable_cols();
+ double* Ax = A->mutable_values();
+ int counter = 0;
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ Ai[counter] = i;
+ Aj[counter] = j;
+ ++counter;
+ }
+ }
+ Ax[0] = 2.;
+ Ax[1] = -1.;
+ Ax[2] = 0;
+ Ax[3] = -1.;
+ Ax[4] = 2;
+ Ax[5] = -1;
+ Ax[6] = 0;
+ Ax[7] = -1;
+ Ax[8] = 2;
+ A->set_num_nonzeros(9);
+
+ b(0) = -1;
+ b(1) = 0;
+ b(2) = 3;
+
+ x(0) = 1;
+ x(1) = 1;
+ x(2) = 1;
+
+ LinearSolver::Options options;
+ options.max_num_iterations = 10;
+
+ LinearSolver::PerSolveOptions per_solve_options;
+ per_solve_options.r_tolerance = 1e-9;
+
+ ConjugateGradientsSolver solver(options);
+ LinearSolver::Summary summary =
+ solver.Solve(A.get(), b.data(), per_solve_options, x.data());
+
+ EXPECT_EQ(summary.termination_type, TOLERANCE);
+
+ ASSERT_DOUBLE_EQ(0, x(0));
+ ASSERT_DOUBLE_EQ(1, x(1));
+ ASSERT_DOUBLE_EQ(2, x(2));
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/system_test.cc b/internal/ceres/system_test.cc
new file mode 100644
index 0000000..4548bd0
--- /dev/null
+++ b/internal/ceres/system_test.cc
@@ -0,0 +1,537 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+// sameeragarwal@google.com (Sameer Agarwal)
+//
+// System level tests for Ceres. The current suite of two tests. The
+// first test is a small test based on Powell's Function. It is a
+// scalar problem with 4 variables. The second problem is a bundle
+// adjustment problem with 16 cameras and two thousand cameras. The
+// first problem is to test the sanity test the factorization based
+// solvers. The second problem is used to test the various
+// combinations of solvers, orderings, preconditioners and
+// multithreading.
+
+#include <cmath>
+#include <cstdio>
+#include <cstdlib>
+#include <string>
+
+#include "ceres/autodiff_cost_function.h"
+#include "ceres/ordered_groups.h"
+#include "ceres/problem.h"
+#include "ceres/rotation.h"
+#include "ceres/solver.h"
+#include "ceres/stringprintf.h"
+#include "ceres/test_util.h"
+#include "ceres/types.h"
+#include "gflags/gflags.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+const bool kAutomaticOrdering = true;
+const bool kUserOrdering = false;
+
+// Struct used for configuring the solver.
+struct SolverConfig {
+ SolverConfig(LinearSolverType linear_solver_type,
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library,
+ bool use_automatic_ordering)
+ : linear_solver_type(linear_solver_type),
+ sparse_linear_algebra_library(sparse_linear_algebra_library),
+ use_automatic_ordering(use_automatic_ordering),
+ preconditioner_type(IDENTITY),
+ num_threads(1) {
+ }
+
+ SolverConfig(LinearSolverType linear_solver_type,
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library,
+ bool use_automatic_ordering,
+ PreconditionerType preconditioner_type)
+ : linear_solver_type(linear_solver_type),
+ sparse_linear_algebra_library(sparse_linear_algebra_library),
+ use_automatic_ordering(use_automatic_ordering),
+ preconditioner_type(preconditioner_type),
+ num_threads(1) {
+ }
+
+ string ToString() const {
+ return StringPrintf(
+ "(%s, %s, %s, %s, %d)",
+ LinearSolverTypeToString(linear_solver_type),
+ SparseLinearAlgebraLibraryTypeToString(sparse_linear_algebra_library),
+ use_automatic_ordering ? "AUTOMATIC" : "USER",
+ PreconditionerTypeToString(preconditioner_type),
+ num_threads);
+ }
+
+ LinearSolverType linear_solver_type;
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
+ bool use_automatic_ordering;
+ PreconditionerType preconditioner_type;
+ int num_threads;
+};
+
+// Templated function that given a set of solver configurations,
+// instantiates a new copy of SystemTestProblem for each configuration
+// and solves it. The solutions are expected to have residuals with
+// coordinate-wise maximum absolute difference less than or equal to
+// max_abs_difference.
+//
+// The template parameter SystemTestProblem is expected to implement
+// the following interface.
+//
+// class SystemTestProblem {
+// public:
+// SystemTestProblem();
+// Problem* mutable_problem();
+// Solver::Options* mutable_solver_options();
+// };
+template <typename SystemTestProblem>
+void RunSolversAndCheckTheyMatch(const vector<SolverConfig>& configurations,
+ const double max_abs_difference) {
+ int num_configurations = configurations.size();
+ vector<SystemTestProblem*> problems;
+ vector<Solver::Summary> summaries(num_configurations);
+
+ for (int i = 0; i < num_configurations; ++i) {
+ SystemTestProblem* system_test_problem = new SystemTestProblem();
+
+ const SolverConfig& config = configurations[i];
+
+ Solver::Options& options = *(system_test_problem->mutable_solver_options());
+ options.linear_solver_type = config.linear_solver_type;
+ options.sparse_linear_algebra_library =
+ config.sparse_linear_algebra_library;
+ options.preconditioner_type = config.preconditioner_type;
+ options.num_threads = config.num_threads;
+ options.num_linear_solver_threads = config.num_threads;
+ options.return_final_residuals = true;
+
+ if (config.use_automatic_ordering) {
+ delete options.linear_solver_ordering;
+ options.linear_solver_ordering = NULL;
+ }
+
+ LOG(INFO) << "Running solver configuration: "
+ << config.ToString();
+
+ Solve(options,
+ system_test_problem->mutable_problem(),
+ &summaries[i]);
+
+ CHECK_NE(summaries[i].termination_type, ceres::NUMERICAL_FAILURE)
+ << "Solver configuration " << i << " failed.";
+ problems.push_back(system_test_problem);
+
+ // Compare the resulting solutions to each other. Arbitrarily take
+ // SPARSE_NORMAL_CHOLESKY as the golden solve. We compare
+ // solutions by comparing their residual vectors. We do not
+ // compare parameter vectors because it is much more brittle and
+ // error prone to do so, since the same problem can have nearly
+ // the same residuals at two completely different positions in
+ // parameter space.
+ if (i > 0) {
+ const vector<double>& reference_residuals = summaries[0].final_residuals;
+ const vector<double>& current_residuals = summaries[i].final_residuals;
+
+ for (int j = 0; j < reference_residuals.size(); ++j) {
+ EXPECT_NEAR(current_residuals[j],
+ reference_residuals[j],
+ max_abs_difference)
+ << "Not close enough residual:" << j
+ << " reference " << reference_residuals[j]
+ << " current " << current_residuals[j];
+ }
+ }
+ }
+
+ for (int i = 0; i < num_configurations; ++i) {
+ delete problems[i];
+ }
+}
+
+// This class implements the SystemTestProblem interface and provides
+// access to an implementation of Powell's singular function.
+//
+// F = 1/2 (f1^2 + f2^2 + f3^2 + f4^2)
+//
+// f1 = x1 + 10*x2;
+// f2 = sqrt(5) * (x3 - x4)
+// f3 = (x2 - 2*x3)^2
+// f4 = sqrt(10) * (x1 - x4)^2
+//
+// The starting values are x1 = 3, x2 = -1, x3 = 0, x4 = 1.
+// The minimum is 0 at (x1, x2, x3, x4) = 0.
+//
+// From: Testing Unconstrained Optimization Software by Jorge J. More, Burton S.
+// Garbow and Kenneth E. Hillstrom in ACM Transactions on Mathematical Software,
+// Vol 7(1), March 1981.
+class PowellsFunction {
+ public:
+ PowellsFunction() {
+ x_[0] = 3.0;
+ x_[1] = -1.0;
+ x_[2] = 0.0;
+ x_[3] = 1.0;
+
+ problem_.AddResidualBlock(
+ new AutoDiffCostFunction<F1, 1, 1, 1>(new F1), NULL, &x_[0], &x_[1]);
+ problem_.AddResidualBlock(
+ new AutoDiffCostFunction<F2, 1, 1, 1>(new F2), NULL, &x_[2], &x_[3]);
+ problem_.AddResidualBlock(
+ new AutoDiffCostFunction<F3, 1, 1, 1>(new F3), NULL, &x_[1], &x_[2]);
+ problem_.AddResidualBlock(
+ new AutoDiffCostFunction<F4, 1, 1, 1>(new F4), NULL, &x_[0], &x_[3]);
+
+ options_.max_num_iterations = 10;
+ }
+
+ Problem* mutable_problem() { return &problem_; }
+ Solver::Options* mutable_solver_options() { return &options_; }
+
+ private:
+ // Templated functions used for automatically differentiated cost
+ // functions.
+ class F1 {
+ public:
+ template <typename T> bool operator()(const T* const x1,
+ const T* const x2,
+ T* residual) const {
+ // f1 = x1 + 10 * x2;
+ *residual = *x1 + T(10.0) * *x2;
+ return true;
+ }
+ };
+
+ class F2 {
+ public:
+ template <typename T> bool operator()(const T* const x3,
+ const T* const x4,
+ T* residual) const {
+ // f2 = sqrt(5) (x3 - x4)
+ *residual = T(sqrt(5.0)) * (*x3 - *x4);
+ return true;
+ }
+ };
+
+ class F3 {
+ public:
+ template <typename T> bool operator()(const T* const x2,
+ const T* const x4,
+ T* residual) const {
+ // f3 = (x2 - 2 x3)^2
+ residual[0] = (x2[0] - T(2.0) * x4[0]) * (x2[0] - T(2.0) * x4[0]);
+ return true;
+ }
+ };
+
+ class F4 {
+ public:
+ template <typename T> bool operator()(const T* const x1,
+ const T* const x4,
+ T* residual) const {
+ // f4 = sqrt(10) (x1 - x4)^2
+ residual[0] = T(sqrt(10.0)) * (x1[0] - x4[0]) * (x1[0] - x4[0]);
+ return true;
+ }
+ };
+
+ double x_[4];
+ Problem problem_;
+ Solver::Options options_;
+};
+
+TEST(SystemTest, PowellsFunction) {
+ vector<SolverConfig> configs;
+#define CONFIGURE(linear_solver, sparse_linear_algebra_library, ordering) \
+ configs.push_back(SolverConfig(linear_solver, \
+ sparse_linear_algebra_library, \
+ ordering))
+
+ CONFIGURE(DENSE_QR, SUITE_SPARSE, kAutomaticOrdering);
+ CONFIGURE(DENSE_NORMAL_CHOLESKY, SUITE_SPARSE, kAutomaticOrdering);
+ CONFIGURE(DENSE_SCHUR, SUITE_SPARSE, kAutomaticOrdering);
+
+#ifndef CERES_NO_SUITESPARSE
+ CONFIGURE(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE, kAutomaticOrdering);
+#endif // CERES_NO_SUITESPARSE
+
+#ifndef CERES_NO_CXSPARSE
+ CONFIGURE(SPARSE_NORMAL_CHOLESKY, CX_SPARSE, kAutomaticOrdering);
+#endif // CERES_NO_CXSPARSE
+
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering);
+
+#undef CONFIGURE
+
+ const double kMaxAbsoluteDifference = 1e-8;
+ RunSolversAndCheckTheyMatch<PowellsFunction>(configs, kMaxAbsoluteDifference);
+}
+
+// This class implements the SystemTestProblem interface and provides
+// access to a bundle adjustment problem. It is based on
+// examples/bundle_adjustment_example.cc. Currently a small 16 camera
+// problem is hard coded in the constructor. Going forward we may
+// extend this to a larger number of problems.
+class BundleAdjustmentProblem {
+ public:
+ BundleAdjustmentProblem() {
+ const string input_file = TestFileAbsolutePath("problem-16-22106-pre.txt");
+ ReadData(input_file);
+ BuildProblem();
+ }
+
+ ~BundleAdjustmentProblem() {
+ delete []point_index_;
+ delete []camera_index_;
+ delete []observations_;
+ delete []parameters_;
+ }
+
+ Problem* mutable_problem() { return &problem_; }
+ Solver::Options* mutable_solver_options() { return &options_; }
+
+ int num_cameras() const { return num_cameras_; }
+ int num_points() const { return num_points_; }
+ int num_observations() const { return num_observations_; }
+ const int* point_index() const { return point_index_; }
+ const int* camera_index() const { return camera_index_; }
+ const double* observations() const { return observations_; }
+ double* mutable_cameras() { return parameters_; }
+ double* mutable_points() { return parameters_ + 9 * num_cameras_; }
+
+ private:
+ void ReadData(const string& filename) {
+ FILE * fptr = fopen(filename.c_str(), "r");
+
+ if (!fptr) {
+ LOG(FATAL) << "File Error: unable to open file " << filename;
+ };
+
+ // This will die horribly on invalid files. Them's the breaks.
+ FscanfOrDie(fptr, "%d", &num_cameras_);
+ FscanfOrDie(fptr, "%d", &num_points_);
+ FscanfOrDie(fptr, "%d", &num_observations_);
+
+ VLOG(1) << "Header: " << num_cameras_
+ << " " << num_points_
+ << " " << num_observations_;
+
+ point_index_ = new int[num_observations_];
+ camera_index_ = new int[num_observations_];
+ observations_ = new double[2 * num_observations_];
+
+ num_parameters_ = 9 * num_cameras_ + 3 * num_points_;
+ parameters_ = new double[num_parameters_];
+
+ for (int i = 0; i < num_observations_; ++i) {
+ FscanfOrDie(fptr, "%d", camera_index_ + i);
+ FscanfOrDie(fptr, "%d", point_index_ + i);
+ for (int j = 0; j < 2; ++j) {
+ FscanfOrDie(fptr, "%lf", observations_ + 2*i + j);
+ }
+ }
+
+ for (int i = 0; i < num_parameters_; ++i) {
+ FscanfOrDie(fptr, "%lf", parameters_ + i);
+ }
+ }
+
+ void BuildProblem() {
+ double* points = mutable_points();
+ double* cameras = mutable_cameras();
+
+ for (int i = 0; i < num_observations(); ++i) {
+ // Each Residual block takes a point and a camera as input and
+ // outputs a 2 dimensional residual.
+ CostFunction* cost_function =
+ new AutoDiffCostFunction<BundlerResidual, 2, 9, 3>(
+ new BundlerResidual(observations_[2*i + 0],
+ observations_[2*i + 1]));
+
+ // Each observation correponds to a pair of a camera and a point
+ // which are identified by camera_index()[i] and
+ // point_index()[i] respectively.
+ double* camera = cameras + 9 * camera_index_[i];
+ double* point = points + 3 * point_index()[i];
+ problem_.AddResidualBlock(cost_function, NULL, camera, point);
+ }
+
+ options_.linear_solver_ordering = new ParameterBlockOrdering;
+
+ // The points come before the cameras.
+ for (int i = 0; i < num_points_; ++i) {
+ options_.linear_solver_ordering->AddElementToGroup(points + 3 * i, 0);
+ }
+
+ for (int i = 0; i < num_cameras_; ++i) {
+ options_.linear_solver_ordering->AddElementToGroup(cameras + 9 * i, 1);
+ }
+
+ options_.max_num_iterations = 25;
+ options_.function_tolerance = 1e-10;
+ options_.gradient_tolerance = 1e-10;
+ options_.parameter_tolerance = 1e-10;
+ }
+
+ template<typename T>
+ void FscanfOrDie(FILE *fptr, const char *format, T *value) {
+ int num_scanned = fscanf(fptr, format, value);
+ if (num_scanned != 1) {
+ LOG(FATAL) << "Invalid UW data file.";
+ }
+ }
+
+ // Templated pinhole camera model. The camera is parameterized
+ // using 9 parameters. 3 for rotation, 3 for translation, 1 for
+ // focal length and 2 for radial distortion. The principal point is
+ // not modeled (i.e. it is assumed be located at the image center).
+ struct BundlerResidual {
+ // (u, v): the position of the observation with respect to the image
+ // center point.
+ BundlerResidual(double u, double v): u(u), v(v) {}
+
+ template <typename T>
+ bool operator()(const T* const camera,
+ const T* const point,
+ T* residuals) const {
+ T p[3];
+ AngleAxisRotatePoint(camera, point, p);
+
+ // Add the translation vector
+ p[0] += camera[3];
+ p[1] += camera[4];
+ p[2] += camera[5];
+
+ const T& focal = camera[6];
+ const T& l1 = camera[7];
+ const T& l2 = camera[8];
+
+ // Compute the center of distortion. The sign change comes from
+ // the camera model that Noah Snavely's Bundler assumes, whereby
+ // the camera coordinate system has a negative z axis.
+ T xp = - focal * p[0] / p[2];
+ T yp = - focal * p[1] / p[2];
+
+ // Apply second and fourth order radial distortion.
+ T r2 = xp*xp + yp*yp;
+ T distortion = T(1.0) + r2 * (l1 + l2 * r2);
+
+ residuals[0] = distortion * xp - T(u);
+ residuals[1] = distortion * yp - T(v);
+
+ return true;
+ }
+
+ double u;
+ double v;
+ };
+
+
+ Problem problem_;
+ Solver::Options options_;
+
+ int num_cameras_;
+ int num_points_;
+ int num_observations_;
+ int num_parameters_;
+
+ int* point_index_;
+ int* camera_index_;
+ double* observations_;
+ // The parameter vector is laid out as follows
+ // [camera_1, ..., camera_n, point_1, ..., point_m]
+ double* parameters_;
+};
+
+TEST(SystemTest, BundleAdjustmentProblem) {
+ vector<SolverConfig> configs;
+
+#define CONFIGURE(linear_solver, sparse_linear_algebra_library, ordering, preconditioner) \
+ configs.push_back(SolverConfig(linear_solver, \
+ sparse_linear_algebra_library, \
+ ordering, \
+ preconditioner))
+
+#ifndef CERES_NO_SUITESPARSE
+ CONFIGURE(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
+ CONFIGURE(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE, kUserOrdering, IDENTITY);
+
+ CONFIGURE(SPARSE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
+ CONFIGURE(SPARSE_SCHUR, SUITE_SPARSE, kUserOrdering, IDENTITY);
+#endif // CERES_NO_SUITESPARSE
+
+#ifndef CERES_NO_CXSPARSE
+ CONFIGURE(SPARSE_SCHUR, CX_SPARSE, kAutomaticOrdering, IDENTITY);
+ CONFIGURE(SPARSE_SCHUR, CX_SPARSE, kUserOrdering, IDENTITY);
+#endif // CERES_NO_CXSPARSE
+
+ CONFIGURE(DENSE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, IDENTITY);
+ CONFIGURE(DENSE_SCHUR, SUITE_SPARSE, kUserOrdering, IDENTITY);
+
+ CONFIGURE(CGNR, SUITE_SPARSE, kAutomaticOrdering, JACOBI);
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kUserOrdering, JACOBI);
+
+#ifndef CERES_NO_SUITESPARSE
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kUserOrdering, SCHUR_JACOBI);
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kUserOrdering, CLUSTER_JACOBI);
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kUserOrdering, CLUSTER_TRIDIAGONAL);
+#endif // CERES_NO_SUITESPARSE
+
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, JACOBI);
+
+#ifndef CERES_NO_SUITESPARSE
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, SCHUR_JACOBI);
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, CLUSTER_JACOBI);
+ CONFIGURE(ITERATIVE_SCHUR, SUITE_SPARSE, kAutomaticOrdering, CLUSTER_TRIDIAGONAL);
+#endif // CERES_NO_SUITESPARSE
+
+#undef CONFIGURE
+
+ // Single threaded evaluators and linear solvers.
+ const double kMaxAbsoluteDifference = 1e-4;
+ RunSolversAndCheckTheyMatch<BundleAdjustmentProblem>(configs,
+ kMaxAbsoluteDifference);
+
+#ifdef CERES_USE_OPENMP
+ // Multithreaded evaluators and linear solvers.
+ for (int i = 0; i < configs.size(); ++i) {
+ configs[i].num_threads = 2;
+ }
+ RunSolversAndCheckTheyMatch<BundleAdjustmentProblem>(configs,
+ kMaxAbsoluteDifference);
+#endif // CERES_USE_OPENMP
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/test_util.cc b/internal/ceres/test_util.cc
new file mode 100644
index 0000000..a3f67bd
--- /dev/null
+++ b/internal/ceres/test_util.cc
@@ -0,0 +1,127 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Utility functions useful for testing.
+
+#include <cmath>
+#include "ceres/file.h"
+#include "ceres/stringprintf.h"
+#include "gflags/gflags.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+DECLARE_string(test_srcdir);
+
+// This macro is used to inject additional path information specific
+// to the build system.
+
+#ifndef CERES_TEST_SRCDIR_SUFFIX
+#define CERES_TEST_SRCDIR_SUFFIX ""
+#endif
+
+namespace ceres {
+namespace internal {
+
+bool ExpectClose(double x, double y, double max_abs_relative_difference) {
+ double absolute_difference = fabs(x - y);
+ double relative_difference = absolute_difference / std::max(fabs(x), fabs(y));
+ if (x == 0 || y == 0) {
+ // If x or y is exactly zero, then relative difference doesn't have any
+ // meaning. Take the absolute difference instead.
+ relative_difference = absolute_difference;
+ }
+ if (relative_difference > max_abs_relative_difference) {
+ VLOG(1) << StringPrintf("x=%17g y=%17g abs=%17g rel=%17g",
+ x, y, absolute_difference, relative_difference);
+ }
+
+ EXPECT_NEAR(relative_difference, 0.0, max_abs_relative_difference);
+ return relative_difference <= max_abs_relative_difference;
+}
+
+void ExpectArraysCloseUptoScale(int n,
+ const double* p,
+ const double* q,
+ double tol) {
+ CHECK_GT(n, 0);
+ CHECK(p);
+ CHECK(q);
+
+ double p_max = 0;
+ double q_max = 0;
+ int p_i = 0;
+ int q_i = 0;
+
+ for (int i = 0; i < n; ++i) {
+ if (std::abs(p[i]) > p_max) {
+ p_max = std::abs(p[i]);
+ p_i = i;
+ }
+ if (std::abs(q[i]) > q_max) {
+ q_max = std::abs(q[i]);
+ q_i = i;
+ }
+ }
+
+ // If both arrays are all zeros, they are equal up to scale, but
+ // for testing purposes, that's more likely to be an error than
+ // a desired result.
+ CHECK_NE(p_max, 0.0);
+ CHECK_NE(q_max, 0.0);
+
+ for (int i = 0; i < n; ++i) {
+ double p_norm = p[i] / p[p_i];
+ double q_norm = q[i] / q[q_i];
+
+ EXPECT_NEAR(p_norm, q_norm, tol) << "i=" << i;
+ }
+}
+
+void ExpectArraysClose(int n,
+ const double* p,
+ const double* q,
+ double tol) {
+ CHECK_GT(n, 0);
+ CHECK(p);
+ CHECK(q);
+
+ for (int i = 0; i < n; ++i) {
+ EXPECT_NEAR(p[i], q[i], tol) << "i=" << i;
+ }
+}
+
+string TestFileAbsolutePath(const string& filename) {
+ return JoinPath(FLAGS_test_srcdir + CERES_TEST_SRCDIR_SUFFIX,
+ filename);
+}
+
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/test_util.h b/internal/ceres/test_util.h
new file mode 100644
index 0000000..4c530de
--- /dev/null
+++ b/internal/ceres/test_util.h
@@ -0,0 +1,71 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+
+#include <string>
+#include "ceres/internal/port.h"
+
+#ifndef CERES_INTERNAL_TEST_UTIL_H_
+#define CERES_INTERNAL_TEST_UTIL_H_
+
+namespace ceres {
+namespace internal {
+
+// Expects that x and y have a relative difference of no more than
+// max_abs_relative_difference. If either x or y is zero, then the relative
+// difference is interpreted as an absolute difference.
+bool ExpectClose(double x, double y, double max_abs_relative_difference);
+
+// Expects that for all i = 1,.., n - 1
+//
+// |p[i] - q[i]| / max(|p[i]|, |q[i]|) < tolerance
+void ExpectArraysClose(int n,
+ const double* p,
+ const double* q,
+ double tolerance);
+
+// Expects that for all i = 1,.., n - 1
+//
+// |p[i] / max_norm_p - q[i] / max_norm_q| < tolerance
+//
+// where max_norm_p and max_norm_q are the max norms of the arrays p
+// and q respectively.
+void ExpectArraysCloseUptoScale(int n,
+ const double* p,
+ const double* q,
+ double tolerance);
+
+// Construct a fully qualified path for the test file depending on the
+// local build/testing environment.
+string TestFileAbsolutePath(const string& filename);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_TEST_UTIL_H_
diff --git a/internal/ceres/triplet_sparse_matrix.cc b/internal/ceres/triplet_sparse_matrix.cc
new file mode 100644
index 0000000..a09f38e
--- /dev/null
+++ b/internal/ceres/triplet_sparse_matrix.cc
@@ -0,0 +1,307 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/triplet_sparse_matrix.h"
+
+#include <algorithm>
+#include <cstddef>
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/matrix_proto.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+TripletSparseMatrix::TripletSparseMatrix()
+ : num_rows_(0),
+ num_cols_(0),
+ max_num_nonzeros_(0),
+ num_nonzeros_(0),
+ rows_(NULL),
+ cols_(NULL),
+ values_(NULL) {}
+
+TripletSparseMatrix::~TripletSparseMatrix() {}
+
+TripletSparseMatrix::TripletSparseMatrix(int num_rows,
+ int num_cols,
+ int max_num_nonzeros)
+ : num_rows_(num_rows),
+ num_cols_(num_cols),
+ max_num_nonzeros_(max_num_nonzeros),
+ num_nonzeros_(0),
+ rows_(NULL),
+ cols_(NULL),
+ values_(NULL) {
+ // All the sizes should at least be zero
+ CHECK_GE(num_rows, 0);
+ CHECK_GE(num_cols, 0);
+ CHECK_GE(max_num_nonzeros, 0);
+ AllocateMemory();
+}
+
+TripletSparseMatrix::TripletSparseMatrix(const TripletSparseMatrix& orig)
+ : SparseMatrix(),
+ num_rows_(orig.num_rows_),
+ num_cols_(orig.num_cols_),
+ max_num_nonzeros_(orig.max_num_nonzeros_),
+ num_nonzeros_(orig.num_nonzeros_),
+ rows_(NULL),
+ cols_(NULL),
+ values_(NULL) {
+ AllocateMemory();
+ CopyData(orig);
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+TripletSparseMatrix::TripletSparseMatrix(const SparseMatrixProto& outer_proto) {
+ CHECK(outer_proto.has_triplet_matrix());
+
+ const TripletSparseMatrixProto& proto = outer_proto.triplet_matrix();
+ CHECK(proto.has_num_rows());
+ CHECK(proto.has_num_cols());
+ CHECK_EQ(proto.rows_size(), proto.cols_size());
+ CHECK_EQ(proto.cols_size(), proto.values_size());
+
+ // Initialize the matrix with the appropriate size and capacity.
+ max_num_nonzeros_ = 0;
+ set_num_nonzeros(0);
+ Reserve(proto.num_nonzeros());
+ Resize(proto.num_rows(), proto.num_cols());
+ set_num_nonzeros(proto.num_nonzeros());
+
+ // Copy the entries in.
+ for (int i = 0; i < proto.num_nonzeros(); ++i) {
+ rows_[i] = proto.rows(i);
+ cols_[i] = proto.cols(i);
+ values_[i] = proto.values(i);
+ }
+}
+#endif
+
+TripletSparseMatrix& TripletSparseMatrix::operator=(
+ const TripletSparseMatrix& rhs) {
+ num_rows_ = rhs.num_rows_;
+ num_cols_ = rhs.num_cols_;
+ num_nonzeros_ = rhs.num_nonzeros_;
+ max_num_nonzeros_ = rhs.max_num_nonzeros_;
+ AllocateMemory();
+ CopyData(rhs);
+ return *this;
+}
+
+bool TripletSparseMatrix::AllTripletsWithinBounds() const {
+ for (int i = 0; i < num_nonzeros_; ++i) {
+ if ((rows_[i] < 0) || (rows_[i] >= num_rows_) ||
+ (cols_[i] < 0) || (cols_[i] >= num_cols_))
+ return false;
+ }
+ return true;
+}
+
+void TripletSparseMatrix::Reserve(int new_max_num_nonzeros) {
+ CHECK_LE(num_nonzeros_, new_max_num_nonzeros)
+ << "Reallocation will cause data loss";
+
+ // Nothing to do if we have enough space already.
+ if (new_max_num_nonzeros <= max_num_nonzeros_)
+ return;
+
+ int* new_rows = new int[new_max_num_nonzeros];
+ int* new_cols = new int[new_max_num_nonzeros];
+ double* new_values = new double[new_max_num_nonzeros];
+
+ for (int i = 0; i < num_nonzeros_; ++i) {
+ new_rows[i] = rows_[i];
+ new_cols[i] = cols_[i];
+ new_values[i] = values_[i];
+ }
+
+ rows_.reset(new_rows);
+ cols_.reset(new_cols);
+ values_.reset(new_values);
+
+ max_num_nonzeros_ = new_max_num_nonzeros;
+}
+
+void TripletSparseMatrix::SetZero() {
+ fill(values_.get(), values_.get() + max_num_nonzeros_, 0.0);
+ num_nonzeros_ = 0;
+}
+
+void TripletSparseMatrix::set_num_nonzeros(int num_nonzeros) {
+ CHECK_GE(num_nonzeros, 0);
+ CHECK_LE(num_nonzeros, max_num_nonzeros_);
+ num_nonzeros_ = num_nonzeros;
+};
+
+void TripletSparseMatrix::AllocateMemory() {
+ rows_.reset(new int[max_num_nonzeros_]);
+ cols_.reset(new int[max_num_nonzeros_]);
+ values_.reset(new double[max_num_nonzeros_]);
+}
+
+void TripletSparseMatrix::CopyData(const TripletSparseMatrix& orig) {
+ for (int i = 0; i < num_nonzeros_; ++i) {
+ rows_[i] = orig.rows_[i];
+ cols_[i] = orig.cols_[i];
+ values_[i] = orig.values_[i];
+ }
+}
+
+void TripletSparseMatrix::RightMultiply(const double* x, double* y) const {
+ for (int i = 0; i < num_nonzeros_; ++i) {
+ y[rows_[i]] += values_[i]*x[cols_[i]];
+ }
+}
+
+void TripletSparseMatrix::LeftMultiply(const double* x, double* y) const {
+ for (int i = 0; i < num_nonzeros_; ++i) {
+ y[cols_[i]] += values_[i]*x[rows_[i]];
+ }
+}
+
+void TripletSparseMatrix::SquaredColumnNorm(double* x) const {
+ CHECK_NOTNULL(x);
+ VectorRef(x, num_cols_).setZero();
+ for (int i = 0; i < num_nonzeros_; ++i) {
+ x[cols_[i]] += values_[i] * values_[i];
+ }
+}
+
+void TripletSparseMatrix::ScaleColumns(const double* scale) {
+ CHECK_NOTNULL(scale);
+ for (int i = 0; i < num_nonzeros_; ++i) {
+ values_[i] = values_[i] * scale[cols_[i]];
+ }
+}
+
+void TripletSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
+ dense_matrix->resize(num_rows_, num_cols_);
+ dense_matrix->setZero();
+ Matrix& m = *dense_matrix;
+ for (int i = 0; i < num_nonzeros_; ++i) {
+ m(rows_[i], cols_[i]) += values_[i];
+ }
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+void TripletSparseMatrix::ToProto(SparseMatrixProto *proto) const {
+ proto->Clear();
+
+ TripletSparseMatrixProto* tsm_proto = proto->mutable_triplet_matrix();
+ tsm_proto->set_num_rows(num_rows_);
+ tsm_proto->set_num_cols(num_cols_);
+ tsm_proto->set_num_nonzeros(num_nonzeros_);
+ for (int i = 0; i < num_nonzeros_; ++i) {
+ tsm_proto->add_rows(rows_[i]);
+ tsm_proto->add_cols(cols_[i]);
+ tsm_proto->add_values(values_[i]);
+ }
+}
+#endif
+
+void TripletSparseMatrix::AppendRows(const TripletSparseMatrix& B) {
+ CHECK_EQ(B.num_cols(), num_cols_);
+ Reserve(num_nonzeros_ + B.num_nonzeros_);
+ for (int i = 0; i < B.num_nonzeros_; ++i) {
+ rows_.get()[num_nonzeros_] = B.rows()[i] + num_rows_;
+ cols_.get()[num_nonzeros_] = B.cols()[i];
+ values_.get()[num_nonzeros_++] = B.values()[i];
+ }
+ num_rows_ = num_rows_ + B.num_rows();
+}
+
+void TripletSparseMatrix::AppendCols(const TripletSparseMatrix& B) {
+ CHECK_EQ(B.num_rows(), num_rows_);
+ Reserve(num_nonzeros_ + B.num_nonzeros_);
+ for (int i = 0; i < B.num_nonzeros_; ++i, ++num_nonzeros_) {
+ rows_.get()[num_nonzeros_] = B.rows()[i];
+ cols_.get()[num_nonzeros_] = B.cols()[i] + num_cols_;
+ values_.get()[num_nonzeros_] = B.values()[i];
+ }
+ num_cols_ = num_cols_ + B.num_cols();
+}
+
+
+void TripletSparseMatrix::Resize(int new_num_rows, int new_num_cols) {
+ if ((new_num_rows >= num_rows_) && (new_num_cols >= num_cols_)) {
+ num_rows_ = new_num_rows;
+ num_cols_ = new_num_cols;
+ return;
+ }
+
+ num_rows_ = new_num_rows;
+ num_cols_ = new_num_cols;
+
+ int* r_ptr = rows_.get();
+ int* c_ptr = cols_.get();
+ double* v_ptr = values_.get();
+
+ int dropped_terms = 0;
+ for (int i = 0; i < num_nonzeros_; ++i) {
+ if ((r_ptr[i] < num_rows_) && (c_ptr[i] < num_cols_)) {
+ if (dropped_terms) {
+ r_ptr[i-dropped_terms] = r_ptr[i];
+ c_ptr[i-dropped_terms] = c_ptr[i];
+ v_ptr[i-dropped_terms] = v_ptr[i];
+ }
+ } else {
+ ++dropped_terms;
+ }
+ }
+ num_nonzeros_ -= dropped_terms;
+}
+
+TripletSparseMatrix* TripletSparseMatrix::CreateSparseDiagonalMatrix(
+ const double* values, int num_rows) {
+ TripletSparseMatrix* m =
+ new TripletSparseMatrix(num_rows, num_rows, num_rows);
+ for (int i = 0; i < num_rows; ++i) {
+ m->mutable_rows()[i] = i;
+ m->mutable_cols()[i] = i;
+ m->mutable_values()[i] = values[i];
+ }
+ m->set_num_nonzeros(num_rows);
+ return m;
+}
+
+void TripletSparseMatrix::ToTextFile(FILE* file) const {
+ CHECK_NOTNULL(file);
+ for (int i = 0; i < num_nonzeros_; ++i) {
+ fprintf(file, "% 10d % 10d %17f\n", rows_[i], cols_[i], values_[i]);
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/triplet_sparse_matrix.h b/internal/ceres/triplet_sparse_matrix.h
new file mode 100644
index 0000000..89a645b
--- /dev/null
+++ b/internal/ceres/triplet_sparse_matrix.h
@@ -0,0 +1,137 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_TRIPLET_SPARSE_MATRIX_H_
+#define CERES_INTERNAL_TRIPLET_SPARSE_MATRIX_H_
+
+#include "ceres/sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class SparseMatrixProto;
+
+// An implementation of the SparseMatrix interface to store and
+// manipulate sparse matrices in triplet (i,j,s) form. This object is
+// inspired by the design of the cholmod_triplet struct used in the
+// SuiteSparse package and is memory layout compatible with it.
+class TripletSparseMatrix : public SparseMatrix {
+ public:
+ TripletSparseMatrix();
+ TripletSparseMatrix(int num_rows, int num_cols, int max_num_nonzeros);
+ explicit TripletSparseMatrix(const TripletSparseMatrix& orig);
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+ explicit TripletSparseMatrix(const SparseMatrixProto& proto);
+#endif
+
+ TripletSparseMatrix& operator=(const TripletSparseMatrix& rhs);
+
+ ~TripletSparseMatrix();
+
+ // Implementation of the SparseMatrix interface.
+ virtual void SetZero();
+ virtual void RightMultiply(const double* x, double* y) const;
+ virtual void LeftMultiply(const double* x, double* y) const;
+ virtual void SquaredColumnNorm(double* x) const;
+ virtual void ScaleColumns(const double* scale);
+ virtual void ToDenseMatrix(Matrix* dense_matrix) const;
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+ virtual void ToProto(SparseMatrixProto *proto) const;
+#endif
+ virtual void ToTextFile(FILE* file) const;
+ virtual int num_rows() const { return num_rows_; }
+ virtual int num_cols() const { return num_cols_; }
+ virtual int num_nonzeros() const { return num_nonzeros_; }
+ virtual const double* values() const { return values_.get(); }
+ virtual double* mutable_values() { return values_.get(); }
+ virtual void set_num_nonzeros(int num_nonzeros);
+
+ // Increase max_num_nonzeros and correspondingly increase the size
+ // of rows_, cols_ and values_. If new_max_num_nonzeros is smaller
+ // than max_num_nonzeros_, then num_non_zeros should be less than or
+ // equal to new_max_num_nonzeros, otherwise data loss is possible
+ // and the method crashes.
+ void Reserve(int new_max_num_nonzeros);
+
+ // Append the matrix B at the bottom of this matrix. B should have
+ // the same number of columns as num_cols_.
+ void AppendRows(const TripletSparseMatrix& B);
+
+ // Append the matrix B at the right of this matrix. B should have
+ // the same number of rows as num_rows_;
+ void AppendCols(const TripletSparseMatrix& B);
+
+ // Resize the matrix. Entries which fall outside the new matrix
+ // bounds are dropped and the num_non_zeros changed accordingly.
+ void Resize(int new_num_rows, int new_num_cols);
+
+ int max_num_nonzeros() const { return max_num_nonzeros_; }
+ const int* rows() const { return rows_.get(); }
+ const int* cols() const { return cols_.get(); }
+ int* mutable_rows() { return rows_.get(); }
+ int* mutable_cols() { return cols_.get(); }
+
+ // Returns true if the entries of the matrix obey the row, column,
+ // and column size bounds and false otherwise.
+ bool AllTripletsWithinBounds() const;
+
+ bool IsValid() const { return AllTripletsWithinBounds(); }
+
+ // Build a sparse diagonal matrix of size num_rows x num_rows from
+ // the array values. Entries of the values array are copied into the
+ // sparse matrix.
+ static TripletSparseMatrix* CreateSparseDiagonalMatrix(const double* values,
+ int num_rows);
+
+ private:
+ void AllocateMemory();
+ void CopyData(const TripletSparseMatrix& orig);
+
+ int num_rows_;
+ int num_cols_;
+ int max_num_nonzeros_;
+ int num_nonzeros_;
+
+ // The data is stored as three arrays. For each i, values_[i] is
+ // stored at the location (rows_[i], cols_[i]). If the there are
+ // multiple entries with the same (rows_[i], cols_[i]), the values_
+ // entries corresponding to them are summed up.
+ scoped_array<int> rows_;
+ scoped_array<int> cols_;
+ scoped_array<double> values_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_TRIPLET_SPARSE_MATRIX_H__
diff --git a/internal/ceres/triplet_sparse_matrix_test.cc b/internal/ceres/triplet_sparse_matrix_test.cc
new file mode 100644
index 0000000..d16682e
--- /dev/null
+++ b/internal/ceres/triplet_sparse_matrix_test.cc
@@ -0,0 +1,354 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/triplet_sparse_matrix.h"
+
+#include "gtest/gtest.h"
+#include "ceres/matrix_proto.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+TEST(TripletSparseMatrix, DefaultConstructorReturnsEmptyObject) {
+ TripletSparseMatrix m;
+ EXPECT_EQ(m.num_rows(), 0);
+ EXPECT_EQ(m.num_cols(), 0);
+ EXPECT_EQ(m.num_nonzeros(), 0);
+ EXPECT_EQ(m.max_num_nonzeros(), 0);
+}
+
+TEST(TripletSparseMatrix, SimpleConstructorAndBasicOperations) {
+ // Build a matrix
+ TripletSparseMatrix m(2, 5, 4);
+ EXPECT_EQ(m.num_rows(), 2);
+ EXPECT_EQ(m.num_cols(), 5);
+ EXPECT_EQ(m.num_nonzeros(), 0);
+ EXPECT_EQ(m.max_num_nonzeros(), 4);
+
+ m.mutable_rows()[0] = 0;
+ m.mutable_cols()[0] = 1;
+ m.mutable_values()[0] = 2.5;
+
+ m.mutable_rows()[1] = 1;
+ m.mutable_cols()[1] = 4;
+ m.mutable_values()[1] = 5.2;
+ m.set_num_nonzeros(2);
+
+ EXPECT_EQ(m.num_nonzeros(), 2);
+
+ ASSERT_TRUE(m.AllTripletsWithinBounds());
+
+ // We should never be able resize and lose data
+ EXPECT_DEATH_IF_SUPPORTED(m.Reserve(1), "Reallocation will cause data loss");
+
+ // We should be able to resize while preserving data
+ m.Reserve(50);
+ EXPECT_EQ(m.max_num_nonzeros(), 50);
+
+ m.Reserve(3);
+ EXPECT_EQ(m.max_num_nonzeros(), 50); // The space is already reserved.
+
+ EXPECT_EQ(m.rows()[0], 0);
+ EXPECT_EQ(m.rows()[1], 1);
+
+ EXPECT_EQ(m.cols()[0], 1);
+ EXPECT_EQ(m.cols()[1], 4);
+
+ EXPECT_DOUBLE_EQ(m.values()[0], 2.5);
+ EXPECT_DOUBLE_EQ(m.values()[1], 5.2);
+
+ // Bounds check should fail
+ m.mutable_rows()[0] = 10;
+ EXPECT_FALSE(m.AllTripletsWithinBounds());
+
+ m.mutable_rows()[0] = 1;
+ m.mutable_cols()[0] = 100;
+ EXPECT_FALSE(m.AllTripletsWithinBounds());
+
+ // Remove all data and then resize the data store
+ m.SetZero();
+ EXPECT_EQ(m.num_nonzeros(), 0);
+ m.Reserve(1);
+}
+
+TEST(TripletSparseMatrix, CopyConstructor) {
+ TripletSparseMatrix orig(2, 5, 4);
+ orig.mutable_rows()[0] = 0;
+ orig.mutable_cols()[0] = 1;
+ orig.mutable_values()[0] = 2.5;
+
+ orig.mutable_rows()[1] = 1;
+ orig.mutable_cols()[1] = 4;
+ orig.mutable_values()[1] = 5.2;
+ orig.set_num_nonzeros(2);
+
+ TripletSparseMatrix cpy(orig);
+
+ EXPECT_EQ(cpy.num_rows(), 2);
+ EXPECT_EQ(cpy.num_cols(), 5);
+ ASSERT_EQ(cpy.num_nonzeros(), 2);
+ EXPECT_EQ(cpy.max_num_nonzeros(), 4);
+
+ EXPECT_EQ(cpy.rows()[0], 0);
+ EXPECT_EQ(cpy.rows()[1], 1);
+
+ EXPECT_EQ(cpy.cols()[0], 1);
+ EXPECT_EQ(cpy.cols()[1], 4);
+
+ EXPECT_DOUBLE_EQ(cpy.values()[0], 2.5);
+ EXPECT_DOUBLE_EQ(cpy.values()[1], 5.2);
+}
+
+TEST(TripletSparseMatrix, AssignmentOperator) {
+ TripletSparseMatrix orig(2, 5, 4);
+ orig.mutable_rows()[0] = 0;
+ orig.mutable_cols()[0] = 1;
+ orig.mutable_values()[0] = 2.5;
+
+ orig.mutable_rows()[1] = 1;
+ orig.mutable_cols()[1] = 4;
+ orig.mutable_values()[1] = 5.2;
+ orig.set_num_nonzeros(2);
+
+ TripletSparseMatrix cpy(3, 50, 40);
+ cpy.mutable_rows()[0] = 0;
+ cpy.mutable_cols()[0] = 10;
+ cpy.mutable_values()[0] = 10.22;
+
+ cpy.mutable_rows()[1] = 2;
+ cpy.mutable_cols()[1] = 23;
+ cpy.mutable_values()[1] = 34.45;
+
+ cpy.mutable_rows()[0] = 0;
+ cpy.mutable_cols()[0] = 10;
+ cpy.mutable_values()[0] = 10.22;
+
+ cpy.mutable_rows()[1] = 0;
+ cpy.mutable_cols()[1] = 3;
+ cpy.mutable_values()[1] = 4.4;
+ cpy.set_num_nonzeros(3);
+
+ cpy = orig;
+
+ EXPECT_EQ(cpy.num_rows(), 2);
+ EXPECT_EQ(cpy.num_cols(), 5);
+ ASSERT_EQ(cpy.num_nonzeros(), 2);
+ EXPECT_EQ(cpy.max_num_nonzeros(), 4);
+
+ EXPECT_EQ(cpy.rows()[0], 0);
+ EXPECT_EQ(cpy.rows()[1], 1);
+
+ EXPECT_EQ(cpy.cols()[0], 1);
+ EXPECT_EQ(cpy.cols()[1], 4);
+
+ EXPECT_DOUBLE_EQ(cpy.values()[0], 2.5);
+ EXPECT_DOUBLE_EQ(cpy.values()[1], 5.2);
+}
+
+TEST(TripletSparseMatrix, AppendRows) {
+ // Build one matrix.
+ TripletSparseMatrix m(2, 5, 4);
+ m.mutable_rows()[0] = 0;
+ m.mutable_cols()[0] = 1;
+ m.mutable_values()[0] = 2.5;
+
+ m.mutable_rows()[1] = 1;
+ m.mutable_cols()[1] = 4;
+ m.mutable_values()[1] = 5.2;
+ m.set_num_nonzeros(2);
+
+ // Build another matrix.
+ TripletSparseMatrix a(10, 5, 4);
+ a.mutable_rows()[0] = 0;
+ a.mutable_cols()[0] = 1;
+ a.mutable_values()[0] = 3.5;
+
+ a.mutable_rows()[1] = 1;
+ a.mutable_cols()[1] = 4;
+ a.mutable_values()[1] = 6.2;
+
+ a.mutable_rows()[2] = 9;
+ a.mutable_cols()[2] = 5;
+ a.mutable_values()[2] = 1;
+ a.set_num_nonzeros(3);
+
+ // Glue the second matrix to the bottom of the first.
+ m.AppendRows(a);
+
+ EXPECT_EQ(m.num_rows(), 12);
+ EXPECT_EQ(m.num_cols(), 5);
+ ASSERT_EQ(m.num_nonzeros(), 5);
+
+ EXPECT_EQ(m.values()[0], 2.5);
+ EXPECT_EQ(m.values()[1], 5.2);
+ EXPECT_EQ(m.values()[2], 3.5);
+ EXPECT_EQ(m.values()[3], 6.2);
+ EXPECT_EQ(m.values()[4], 1);
+
+ EXPECT_EQ(m.rows()[0], 0);
+ EXPECT_EQ(m.rows()[1], 1);
+ EXPECT_EQ(m.rows()[2], 2);
+ EXPECT_EQ(m.rows()[3], 3);
+ EXPECT_EQ(m.rows()[4], 11);
+
+ EXPECT_EQ(m.cols()[0], 1);
+ EXPECT_EQ(m.cols()[1], 4);
+ EXPECT_EQ(m.cols()[2], 1);
+ EXPECT_EQ(m.cols()[3], 4);
+ EXPECT_EQ(m.cols()[4], 5);
+}
+
+TEST(TripletSparseMatrix, AppendCols) {
+ // Build one matrix.
+ TripletSparseMatrix m(2, 5, 4);
+ m.mutable_rows()[0] = 0;
+ m.mutable_cols()[0] = 1;
+ m.mutable_values()[0] = 2.5;
+
+ m.mutable_rows()[1] = 1;
+ m.mutable_cols()[1] = 4;
+ m.mutable_values()[1] = 5.2;
+ m.set_num_nonzeros(2);
+
+ // Build another matrix.
+ TripletSparseMatrix a(2, 15, 4);
+ a.mutable_rows()[0] = 0;
+ a.mutable_cols()[0] = 1;
+ a.mutable_values()[0] = 3.5;
+
+ a.mutable_rows()[1] = 1;
+ a.mutable_cols()[1] = 4;
+ a.mutable_values()[1] = 6.2;
+
+ a.mutable_rows()[2] = 0;
+ a.mutable_cols()[2] = 10;
+ a.mutable_values()[2] = 1;
+ a.set_num_nonzeros(3);
+
+ // Glue the second matrix to the left of the first.
+ m.AppendCols(a);
+
+ EXPECT_EQ(m.num_rows(), 2);
+ EXPECT_EQ(m.num_cols(), 20);
+ ASSERT_EQ(m.num_nonzeros(), 5);
+
+ EXPECT_EQ(m.values()[0], 2.5);
+ EXPECT_EQ(m.values()[1], 5.2);
+ EXPECT_EQ(m.values()[2], 3.5);
+ EXPECT_EQ(m.values()[3], 6.2);
+ EXPECT_EQ(m.values()[4], 1);
+
+ EXPECT_EQ(m.rows()[0], 0);
+ EXPECT_EQ(m.rows()[1], 1);
+ EXPECT_EQ(m.rows()[2], 0);
+ EXPECT_EQ(m.rows()[3], 1);
+ EXPECT_EQ(m.rows()[4], 0);
+
+ EXPECT_EQ(m.cols()[0], 1);
+ EXPECT_EQ(m.cols()[1], 4);
+ EXPECT_EQ(m.cols()[2], 6);
+ EXPECT_EQ(m.cols()[3], 9);
+ EXPECT_EQ(m.cols()[4], 15);
+}
+
+TEST(TripletSparseMatrix, CreateDiagonalMatrix) {
+ scoped_array<double> values(new double[10]);
+ for (int i = 0; i < 10; ++i)
+ values[i] = i;
+
+ scoped_ptr<TripletSparseMatrix> m(
+ TripletSparseMatrix::CreateSparseDiagonalMatrix(values.get(), 10));
+ EXPECT_EQ(m->num_rows(), 10);
+ EXPECT_EQ(m->num_cols(), 10);
+ ASSERT_EQ(m->num_nonzeros(), 10);
+ for (int i = 0; i < 10 ; ++i) {
+ EXPECT_EQ(m->rows()[i], i);
+ EXPECT_EQ(m->cols()[i], i);
+ EXPECT_EQ(m->values()[i], i);
+ }
+}
+
+TEST(TripletSparseMatrix, Resize) {
+ TripletSparseMatrix m(10, 20, 200);
+ int nnz = 0;
+ for (int i = 0; i < 10; ++i) {
+ for (int j = 0; j < 20; ++j) {
+ m.mutable_rows()[nnz] = i;
+ m.mutable_cols()[nnz] = j;
+ m.mutable_values()[nnz++] = i+j;
+ }
+ }
+ m.set_num_nonzeros(nnz);
+ m.Resize(5, 6);
+ EXPECT_EQ(m.num_rows(), 5);
+ EXPECT_EQ(m.num_cols(), 6);
+ ASSERT_EQ(m.num_nonzeros(), 30);
+ for (int i = 0; i < 30; ++i) {
+ EXPECT_EQ(m.values()[i], m.rows()[i] + m.cols()[i]);
+ }
+}
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+TEST(TripletSparseMatrix, Serialization) {
+ TripletSparseMatrix m(2, 5, 4);
+
+ m.mutable_rows()[0] = 0;
+ m.mutable_cols()[0] = 1;
+ m.mutable_values()[0] = 2.5;
+
+ m.mutable_rows()[1] = 1;
+ m.mutable_cols()[1] = 4;
+ m.mutable_values()[1] = 5.2;
+ m.set_num_nonzeros(2);
+
+ // Roundtrip through serialization and check for equality.
+ SparseMatrixProto proto;
+ m.ToProto(&proto);
+
+ TripletSparseMatrix n(proto);
+
+ ASSERT_EQ(n.num_rows(), 2);
+ ASSERT_EQ(n.num_cols(), 5);
+
+ // Note that max_num_nonzeros gets truncated; the serialization
+ ASSERT_EQ(n.num_nonzeros(), 2);
+ ASSERT_EQ(n.max_num_nonzeros(), 2);
+
+ for (int i = 0; i < m.num_nonzeros(); ++i) {
+ EXPECT_EQ(m.rows()[i], n.rows()[i]);
+ EXPECT_EQ(m.cols()[i], n.cols()[i]);
+ EXPECT_EQ(m.values()[i], n.values()[i]);
+ }
+}
+#endif
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/trust_region_minimizer.cc b/internal/ceres/trust_region_minimizer.cc
new file mode 100644
index 0000000..db2641d
--- /dev/null
+++ b/internal/ceres/trust_region_minimizer.cc
@@ -0,0 +1,557 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/trust_region_minimizer.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <cmath>
+#include <cstring>
+#include <limits>
+#include <string>
+#include <vector>
+
+#include "Eigen/Core"
+#include "ceres/array_utils.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/stringprintf.h"
+#include "ceres/trust_region_strategy.h"
+#include "ceres/types.h"
+#include "ceres/wall_time.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+// Small constant for various floating point issues.
+const double kEpsilon = 1e-12;
+} // namespace
+
+// Execute the list of IterationCallbacks sequentially. If any one of
+// the callbacks does not return SOLVER_CONTINUE, then stop and return
+// its status.
+CallbackReturnType TrustRegionMinimizer::RunCallbacks(
+ const IterationSummary& iteration_summary) {
+ for (int i = 0; i < options_.callbacks.size(); ++i) {
+ const CallbackReturnType status =
+ (*options_.callbacks[i])(iteration_summary);
+ if (status != SOLVER_CONTINUE) {
+ return status;
+ }
+ }
+ return SOLVER_CONTINUE;
+}
+
+// Compute a scaling vector that is used to improve the conditioning
+// of the Jacobian.
+void TrustRegionMinimizer::EstimateScale(const SparseMatrix& jacobian,
+ double* scale) const {
+ jacobian.SquaredColumnNorm(scale);
+ for (int i = 0; i < jacobian.num_cols(); ++i) {
+ scale[i] = 1.0 / (1.0 + sqrt(scale[i]));
+ }
+}
+
+void TrustRegionMinimizer::Init(const Minimizer::Options& options) {
+ options_ = options;
+ sort(options_.lsqp_iterations_to_dump.begin(),
+ options_.lsqp_iterations_to_dump.end());
+}
+
+bool TrustRegionMinimizer::MaybeDumpLinearLeastSquaresProblem(
+ const int iteration,
+ const SparseMatrix* jacobian,
+ const double* residuals,
+ const double* step) const {
+ // TODO(sameeragarwal): Since the use of trust_region_radius has
+ // moved inside TrustRegionStrategy, its not clear how we dump the
+ // regularization vector/matrix anymore.
+ //
+ // Also num_eliminate_blocks is not visible to the trust region
+ // minimizer either.
+ //
+ // Both of these indicate that this is the wrong place for this
+ // code, and going forward this should needs fixing/refactoring.
+ return true;
+}
+
+void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
+ double* parameters,
+ Solver::Summary* summary) {
+ double start_time = WallTimeInSeconds();
+ double iteration_start_time = start_time;
+ Init(options);
+
+ summary->termination_type = NO_CONVERGENCE;
+ summary->num_successful_steps = 0;
+ summary->num_unsuccessful_steps = 0;
+
+ Evaluator* evaluator = CHECK_NOTNULL(options_.evaluator);
+ SparseMatrix* jacobian = CHECK_NOTNULL(options_.jacobian);
+ TrustRegionStrategy* strategy = CHECK_NOTNULL(options_.trust_region_strategy);
+
+ const int num_parameters = evaluator->NumParameters();
+ const int num_effective_parameters = evaluator->NumEffectiveParameters();
+ const int num_residuals = evaluator->NumResiduals();
+
+ VectorRef x_min(parameters, num_parameters);
+ Vector x = x_min;
+ double x_norm = x.norm();
+
+ Vector residuals(num_residuals);
+ Vector trust_region_step(num_effective_parameters);
+ Vector delta(num_effective_parameters);
+ Vector x_plus_delta(num_parameters);
+ Vector gradient(num_effective_parameters);
+ Vector model_residuals(num_residuals);
+ Vector scale(num_effective_parameters);
+
+ IterationSummary iteration_summary;
+ iteration_summary.iteration = 0;
+ iteration_summary.step_is_valid = false;
+ iteration_summary.step_is_successful = false;
+ iteration_summary.cost_change = 0.0;
+ iteration_summary.gradient_max_norm = 0.0;
+ iteration_summary.step_norm = 0.0;
+ iteration_summary.relative_decrease = 0.0;
+ iteration_summary.trust_region_radius = strategy->Radius();
+ // TODO(sameeragarwal): Rename eta to linear_solver_accuracy or
+ // something similar across the board.
+ iteration_summary.eta = options_.eta;
+ iteration_summary.linear_solver_iterations = 0;
+ iteration_summary.step_solver_time_in_seconds = 0;
+
+ // Do initial cost and Jacobian evaluation.
+ double cost = 0.0;
+ if (!evaluator->Evaluate(x.data(), &cost, residuals.data(), NULL, jacobian)) {
+ LOG(WARNING) << "Terminating: Residual and Jacobian evaluation failed.";
+ summary->termination_type = NUMERICAL_FAILURE;
+ return;
+ }
+
+ iteration_summary.cost = cost + summary->fixed_cost;
+
+ int num_consecutive_nonmonotonic_steps = 0;
+ double minimum_cost = cost;
+ double reference_cost = cost;
+ double accumulated_reference_model_cost_change = 0.0;
+ double candidate_cost = cost;
+ double accumulated_candidate_model_cost_change = 0.0;
+
+ gradient.setZero();
+ jacobian->LeftMultiply(residuals.data(), gradient.data());
+ iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
+
+ if (options_.jacobi_scaling) {
+ EstimateScale(*jacobian, scale.data());
+ jacobian->ScaleColumns(scale.data());
+ } else {
+ scale.setOnes();
+ }
+
+ // The initial gradient max_norm is bounded from below so that we do
+ // not divide by zero.
+ const double gradient_max_norm_0 =
+ max(iteration_summary.gradient_max_norm, kEpsilon);
+ const double absolute_gradient_tolerance =
+ options_.gradient_tolerance * gradient_max_norm_0;
+
+ if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
+ summary->termination_type = GRADIENT_TOLERANCE;
+ VLOG(1) << "Terminating: Gradient tolerance reached."
+ << "Relative gradient max norm: "
+ << iteration_summary.gradient_max_norm / gradient_max_norm_0
+ << " <= " << options_.gradient_tolerance;
+ return;
+ }
+
+ iteration_summary.iteration_time_in_seconds =
+ WallTimeInSeconds() - iteration_start_time;
+ iteration_summary.cumulative_time_in_seconds =
+ WallTimeInSeconds() - start_time
+ + summary->preprocessor_time_in_seconds;
+ summary->iterations.push_back(iteration_summary);
+
+ // Call the various callbacks.
+ switch (RunCallbacks(iteration_summary)) {
+ case SOLVER_TERMINATE_SUCCESSFULLY:
+ summary->termination_type = USER_SUCCESS;
+ VLOG(1) << "Terminating: User callback returned USER_SUCCESS.";
+ return;
+ case SOLVER_ABORT:
+ summary->termination_type = USER_ABORT;
+ VLOG(1) << "Terminating: User callback returned USER_ABORT.";
+ return;
+ case SOLVER_CONTINUE:
+ break;
+ default:
+ LOG(FATAL) << "Unknown type of user callback status";
+ }
+
+ int num_consecutive_invalid_steps = 0;
+ while (true) {
+ iteration_start_time = WallTimeInSeconds();
+ if (iteration_summary.iteration >= options_.max_num_iterations) {
+ summary->termination_type = NO_CONVERGENCE;
+ VLOG(1) << "Terminating: Maximum number of iterations reached.";
+ break;
+ }
+
+ const double total_solver_time = iteration_start_time - start_time +
+ summary->preprocessor_time_in_seconds;
+ if (total_solver_time >= options_.max_solver_time_in_seconds) {
+ summary->termination_type = NO_CONVERGENCE;
+ VLOG(1) << "Terminating: Maximum solver time reached.";
+ break;
+ }
+
+ iteration_summary = IterationSummary();
+ iteration_summary = summary->iterations.back();
+ iteration_summary.iteration = summary->iterations.back().iteration + 1;
+ iteration_summary.step_is_valid = false;
+ iteration_summary.step_is_successful = false;
+
+ const double strategy_start_time = WallTimeInSeconds();
+ TrustRegionStrategy::PerSolveOptions per_solve_options;
+ per_solve_options.eta = options_.eta;
+ TrustRegionStrategy::Summary strategy_summary =
+ strategy->ComputeStep(per_solve_options,
+ jacobian,
+ residuals.data(),
+ trust_region_step.data());
+
+ iteration_summary.step_solver_time_in_seconds =
+ WallTimeInSeconds() - strategy_start_time;
+ iteration_summary.linear_solver_iterations =
+ strategy_summary.num_iterations;
+
+ if (!MaybeDumpLinearLeastSquaresProblem(iteration_summary.iteration,
+ jacobian,
+ residuals.data(),
+ trust_region_step.data())) {
+ LOG(FATAL) << "Tried writing linear least squares problem: "
+ << options.lsqp_dump_directory << "but failed.";
+ }
+
+ double model_cost_change = 0.0;
+ if (strategy_summary.termination_type != FAILURE) {
+ // new_model_cost
+ // = 1/2 [f + J * step]^2
+ // = 1/2 [ f'f + 2f'J * step + step' * J' * J * step ]
+ // model_cost_change
+ // = cost - new_model_cost
+ // = f'f/2 - 1/2 [ f'f + 2f'J * step + step' * J' * J * step]
+ // = -f'J * step - step' * J' * J * step / 2
+ model_residuals.setZero();
+ jacobian->RightMultiply(trust_region_step.data(), model_residuals.data());
+ model_cost_change = -(residuals.dot(model_residuals) +
+ model_residuals.squaredNorm() / 2.0);
+
+ if (model_cost_change < 0.0) {
+ VLOG(1) << "Invalid step: current_cost: " << cost
+ << " absolute difference " << model_cost_change
+ << " relative difference " << (model_cost_change / cost);
+ } else {
+ iteration_summary.step_is_valid = true;
+ }
+ }
+
+ if (!iteration_summary.step_is_valid) {
+ // Invalid steps can happen due to a number of reasons, and we
+ // allow a limited number of successive failures, and return with
+ // NUMERICAL_FAILURE if this limit is exceeded.
+ if (++num_consecutive_invalid_steps >=
+ options_.max_num_consecutive_invalid_steps) {
+ summary->termination_type = NUMERICAL_FAILURE;
+ summary->error = StringPrintf(
+ "Terminating. Number of successive invalid steps more "
+ "than Solver::Options::max_num_consecutive_invalid_steps: %d",
+ options_.max_num_consecutive_invalid_steps);
+
+ LOG(WARNING) << summary->error;
+ return;
+ }
+
+ // We are going to try and reduce the trust region radius and
+ // solve again. To do this, we are going to treat this iteration
+ // as an unsuccessful iteration. Since the various callbacks are
+ // still executed, we are going to fill the iteration summary
+ // with data that assumes a step of length zero and no progress.
+ iteration_summary.cost = cost + summary->fixed_cost;
+ iteration_summary.cost_change = 0.0;
+ iteration_summary.gradient_max_norm =
+ summary->iterations.back().gradient_max_norm;
+ iteration_summary.step_norm = 0.0;
+ iteration_summary.relative_decrease = 0.0;
+ iteration_summary.eta = options_.eta;
+ } else {
+ // The step is numerically valid, so now we can judge its quality.
+ num_consecutive_invalid_steps = 0;
+
+ // Undo the Jacobian column scaling.
+ delta = (trust_region_step.array() * scale.array()).matrix();
+ if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
+ summary->termination_type = NUMERICAL_FAILURE;
+ summary->error =
+ "Terminating. Failed to compute Plus(x, delta, x_plus_delta).";
+
+ LOG(WARNING) << summary->error;
+ return;
+ }
+
+ // Try this step.
+ double new_cost = numeric_limits<double>::max();
+ if (!evaluator->Evaluate(x_plus_delta.data(),
+ &new_cost,
+ NULL, NULL, NULL)) {
+ // If the evaluation of the new cost fails, treat it as a step
+ // with high cost.
+ LOG(WARNING) << "Step failed to evaluate. "
+ << "Treating it as step with infinite cost";
+ new_cost = numeric_limits<double>::max();
+ } else {
+ // Check if performing an inner iteration will make it better.
+ if (options.inner_iteration_minimizer != NULL) {
+ const double x_plus_delta_cost = new_cost;
+ Vector inner_iteration_x = x_plus_delta;
+ Solver::Summary inner_iteration_summary;
+ options.inner_iteration_minimizer->Minimize(options,
+ inner_iteration_x.data(),
+ &inner_iteration_summary);
+ if(!evaluator->Evaluate(inner_iteration_x.data(),
+ &new_cost,
+ NULL, NULL, NULL)) {
+ VLOG(2) << "Inner iteration failed.";
+ new_cost = x_plus_delta_cost;
+ } else {
+ x_plus_delta = inner_iteration_x;
+ // Bost the model_cost_change, since the inner iteration
+ // improvements are not accounted for by the trust region.
+ model_cost_change += x_plus_delta_cost - new_cost;
+ VLOG(2) << "Inner iteration succeeded; current cost: " << cost
+ << " x_plus_delta_cost: " << x_plus_delta_cost
+ << " new_cost: " << new_cost;
+ }
+ }
+ }
+
+ iteration_summary.step_norm = (x - x_plus_delta).norm();
+
+ // Convergence based on parameter_tolerance.
+ const double step_size_tolerance = options_.parameter_tolerance *
+ (x_norm + options_.parameter_tolerance);
+ if (iteration_summary.step_norm <= step_size_tolerance) {
+ VLOG(1) << "Terminating. Parameter tolerance reached. "
+ << "relative step_norm: "
+ << iteration_summary.step_norm /
+ (x_norm + options_.parameter_tolerance)
+ << " <= " << options_.parameter_tolerance;
+ summary->termination_type = PARAMETER_TOLERANCE;
+ return;
+ }
+
+ VLOG(2) << "old cost: " << cost << " new cost: " << new_cost;
+ iteration_summary.cost_change = cost - new_cost;
+ const double absolute_function_tolerance =
+ options_.function_tolerance * cost;
+ if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
+ VLOG(1) << "Terminating. Function tolerance reached. "
+ << "|cost_change|/cost: "
+ << fabs(iteration_summary.cost_change) / cost
+ << " <= " << options_.function_tolerance;
+ summary->termination_type = FUNCTION_TOLERANCE;
+ return;
+ }
+
+ const double relative_decrease =
+ iteration_summary.cost_change / model_cost_change;
+
+ const double historical_relative_decrease =
+ (reference_cost - new_cost) /
+ (accumulated_reference_model_cost_change + model_cost_change);
+
+ // If monotonic steps are being used, then the relative_decrease
+ // is the usual ratio of the change in objective function value
+ // divided by the change in model cost.
+ //
+ // If non-monotonic steps are allowed, then we take the maximum
+ // of the relative_decrease and the
+ // historical_relative_decrease, which measures the increase
+ // from a reference iteration. The model cost change is
+ // estimated by accumulating the model cost changes since the
+ // reference iteration. The historical relative_decrease offers
+ // a boost to a step which is not too bad compared to the
+ // reference iteration, allowing for non-monotonic steps.
+ iteration_summary.relative_decrease =
+ options.use_nonmonotonic_steps
+ ? max(relative_decrease, historical_relative_decrease)
+ : relative_decrease;
+
+ iteration_summary.step_is_successful =
+ iteration_summary.relative_decrease > options_.min_relative_decrease;
+
+ if (iteration_summary.step_is_successful) {
+ accumulated_candidate_model_cost_change += model_cost_change;
+ accumulated_reference_model_cost_change += model_cost_change;
+ if (relative_decrease <= options_.min_relative_decrease) {
+ iteration_summary.step_is_nonmonotonic = true;
+ VLOG(2) << "Non-monotonic step! "
+ << " relative_decrease: " << relative_decrease
+ << " historical_relative_decrease: "
+ << historical_relative_decrease;
+ }
+ }
+ }
+
+ if (iteration_summary.step_is_successful) {
+ ++summary->num_successful_steps;
+ strategy->StepAccepted(iteration_summary.relative_decrease);
+ x = x_plus_delta;
+ x_norm = x.norm();
+
+ // Step looks good, evaluate the residuals and Jacobian at this
+ // point.
+ if (!evaluator->Evaluate(x.data(),
+ &cost,
+ residuals.data(),
+ NULL,
+ jacobian)) {
+ summary->termination_type = NUMERICAL_FAILURE;
+ summary->error = "Terminating: Residual and Jacobian evaluation failed.";
+ LOG(WARNING) << summary->error;
+ return;
+ }
+
+ gradient.setZero();
+ jacobian->LeftMultiply(residuals.data(), gradient.data());
+ iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
+
+ if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
+ summary->termination_type = GRADIENT_TOLERANCE;
+ VLOG(1) << "Terminating: Gradient tolerance reached."
+ << "Relative gradient max norm: "
+ << iteration_summary.gradient_max_norm / gradient_max_norm_0
+ << " <= " << options_.gradient_tolerance;
+ return;
+ }
+
+ if (options_.jacobi_scaling) {
+ jacobian->ScaleColumns(scale.data());
+ }
+
+ // Update the best, reference and candidate iterates.
+ //
+ // Based on algorithm 10.1.2 (page 357) of "Trust Region
+ // Methods" by Conn Gould & Toint, or equations 33-40 of
+ // "Non-monotone trust-region algorithms for nonlinear
+ // optimization subject to convex constraints" by Phil Toint,
+ // Mathematical Programming, 77, 1997.
+ if (cost < minimum_cost) {
+ // A step that improves solution quality was found.
+ x_min = x;
+ minimum_cost = cost;
+ // Set the candidate iterate to the current point.
+ candidate_cost = cost;
+ num_consecutive_nonmonotonic_steps = 0;
+ accumulated_candidate_model_cost_change = 0.0;
+ } else {
+ ++num_consecutive_nonmonotonic_steps;
+ if (cost > candidate_cost) {
+ // The current iterate is has a higher cost than the
+ // candidate iterate. Set the candidate to this point.
+ VLOG(2) << "Updating the candidate iterate to the current point.";
+ candidate_cost = cost;
+ accumulated_candidate_model_cost_change = 0.0;
+ }
+
+ // At this point we have made too many non-monotonic steps and
+ // we are going to reset the value of the reference iterate so
+ // as to force the algorithm to descend.
+ //
+ // This is the case because the candidate iterate has a value
+ // greater than minimum_cost but smaller than the reference
+ // iterate.
+ if (num_consecutive_nonmonotonic_steps ==
+ options.max_consecutive_nonmonotonic_steps) {
+ VLOG(2) << "Resetting the reference point to the candidate point";
+ reference_cost = candidate_cost;
+ accumulated_reference_model_cost_change =
+ accumulated_candidate_model_cost_change;
+ }
+ }
+ } else {
+ ++summary->num_unsuccessful_steps;
+ if (iteration_summary.step_is_valid) {
+ strategy->StepRejected(iteration_summary.relative_decrease);
+ } else {
+ strategy->StepIsInvalid();
+ }
+ }
+
+ iteration_summary.cost = cost + summary->fixed_cost;
+ iteration_summary.trust_region_radius = strategy->Radius();
+ if (iteration_summary.trust_region_radius <
+ options_.min_trust_region_radius) {
+ summary->termination_type = PARAMETER_TOLERANCE;
+ VLOG(1) << "Termination. Minimum trust region radius reached.";
+ return;
+ }
+
+ iteration_summary.iteration_time_in_seconds =
+ WallTimeInSeconds() - iteration_start_time;
+ iteration_summary.cumulative_time_in_seconds =
+ WallTimeInSeconds() - start_time
+ + summary->preprocessor_time_in_seconds;
+ summary->iterations.push_back(iteration_summary);
+
+ switch (RunCallbacks(iteration_summary)) {
+ case SOLVER_TERMINATE_SUCCESSFULLY:
+ summary->termination_type = USER_SUCCESS;
+ VLOG(1) << "Terminating: User callback returned USER_SUCCESS.";
+ return;
+ case SOLVER_ABORT:
+ summary->termination_type = USER_ABORT;
+ VLOG(1) << "Terminating: User callback returned USER_ABORT.";
+ return;
+ case SOLVER_CONTINUE:
+ break;
+ default:
+ LOG(FATAL) << "Unknown type of user callback status";
+ }
+ }
+}
+
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/trust_region_minimizer.h b/internal/ceres/trust_region_minimizer.h
new file mode 100644
index 0000000..a4f5ba3
--- /dev/null
+++ b/internal/ceres/trust_region_minimizer.h
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
+#define CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
+
+#include "ceres/minimizer.h"
+#include "ceres/solver.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+// Generic trust region minimization algorithm. The heavy lifting is
+// done by a TrustRegionStrategy object passed in as one of the
+// arguments to the Minimize method.
+//
+// For example usage, see SolverImpl::Minimize.
+class TrustRegionMinimizer : public Minimizer {
+ public:
+ ~TrustRegionMinimizer() {}
+ virtual void Minimize(const Minimizer::Options& options,
+ double* parameters,
+ Solver::Summary* summary);
+
+ private:
+ void Init(const Minimizer::Options& options);
+ void EstimateScale(const SparseMatrix& jacobian, double* scale) const;
+ CallbackReturnType RunCallbacks(const IterationSummary& iteration_summary);
+ bool MaybeDumpLinearLeastSquaresProblem( const int iteration,
+ const SparseMatrix* jacobian,
+ const double* residuals,
+ const double* step) const;
+
+ Minimizer::Options options_;
+};
+
+} // namespace internal
+} // namespace ceres
+#endif // CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
diff --git a/internal/ceres/trust_region_minimizer_test.cc b/internal/ceres/trust_region_minimizer_test.cc
new file mode 100644
index 0000000..a82dea7
--- /dev/null
+++ b/internal/ceres/trust_region_minimizer_test.cc
@@ -0,0 +1,376 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+// sameeragarwal@google.com (Sameer Agarwal)
+//
+// This tests the TrustRegionMinimizer loop using a direct Evaluator
+// implementation, rather than having a test that goes through all the
+// Program and Problem machinery.
+
+#include <cmath>
+#include "ceres/cost_function.h"
+#include "ceres/dense_qr_solver.h"
+#include "ceres/dense_sparse_matrix.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
+#include "ceres/minimizer.h"
+#include "ceres/problem.h"
+#include "ceres/trust_region_minimizer.h"
+#include "ceres/trust_region_strategy.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+// Templated Evaluator for Powell's function. The template parameters
+// indicate which of the four variables/columns of the jacobian are
+// active. This is equivalent to constructing a problem and using the
+// SubsetLocalParameterization. This allows us to test the support for
+// the Evaluator::Plus operation besides checking for the basic
+// performance of the trust region algorithm.
+template <bool col1, bool col2, bool col3, bool col4>
+class PowellEvaluator2 : public Evaluator {
+ public:
+ PowellEvaluator2()
+ : num_active_cols_(
+ (col1 ? 1 : 0) +
+ (col2 ? 1 : 0) +
+ (col3 ? 1 : 0) +
+ (col4 ? 1 : 0)) {
+ VLOG(1) << "Columns: "
+ << col1 << " "
+ << col2 << " "
+ << col3 << " "
+ << col4;
+ }
+
+ virtual ~PowellEvaluator2() {}
+
+ // Implementation of Evaluator interface.
+ virtual SparseMatrix* CreateJacobian() const {
+ CHECK(col1 || col2 || col3 || col4);
+ DenseSparseMatrix* dense_jacobian =
+ new DenseSparseMatrix(NumResiduals(), NumEffectiveParameters());
+ dense_jacobian->SetZero();
+ return dense_jacobian;
+ }
+
+ virtual bool Evaluate(const double* state,
+ double* cost,
+ double* residuals,
+ double* /* gradient */,
+ SparseMatrix* jacobian) {
+ double x1 = state[0];
+ double x2 = state[1];
+ double x3 = state[2];
+ double x4 = state[3];
+
+ VLOG(1) << "State: "
+ << "x1=" << x1 << ", "
+ << "x2=" << x2 << ", "
+ << "x3=" << x3 << ", "
+ << "x4=" << x4 << ".";
+
+ double f1 = x1 + 10.0 * x2;
+ double f2 = sqrt(5.0) * (x3 - x4);
+ double f3 = pow(x2 - 2.0 * x3, 2.0);
+ double f4 = sqrt(10.0) * pow(x1 - x4, 2.0);
+
+ VLOG(1) << "Function: "
+ << "f1=" << f1 << ", "
+ << "f2=" << f2 << ", "
+ << "f3=" << f3 << ", "
+ << "f4=" << f4 << ".";
+
+ *cost = (f1*f1 + f2*f2 + f3*f3 + f4*f4) / 2.0;
+
+ VLOG(1) << "Cost: " << *cost;
+
+ if (residuals != NULL) {
+ residuals[0] = f1;
+ residuals[1] = f2;
+ residuals[2] = f3;
+ residuals[3] = f4;
+ }
+
+ if (jacobian != NULL) {
+ DenseSparseMatrix* dense_jacobian;
+ dense_jacobian = down_cast<DenseSparseMatrix*>(jacobian);
+ dense_jacobian->SetZero();
+
+ AlignedMatrixRef jacobian_matrix = dense_jacobian->mutable_matrix();
+ CHECK_EQ(jacobian_matrix.cols(), num_active_cols_);
+
+ int column_index = 0;
+ if (col1) {
+ jacobian_matrix.col(column_index++) <<
+ 1.0,
+ 0.0,
+ 0.0,
+ sqrt(10.0) * 2.0 * (x1 - x4) * (1.0 - x4);
+ }
+ if (col2) {
+ jacobian_matrix.col(column_index++) <<
+ 10.0,
+ 0.0,
+ 2.0*(x2 - 2.0*x3)*(1.0 - 2.0*x3),
+ 0.0;
+ }
+
+ if (col3) {
+ jacobian_matrix.col(column_index++) <<
+ 0.0,
+ sqrt(5.0),
+ 2.0*(x2 - 2.0*x3)*(x2 - 2.0),
+ 0.0;
+ }
+
+ if (col4) {
+ jacobian_matrix.col(column_index++) <<
+ 0.0,
+ -sqrt(5.0),
+ 0.0,
+ sqrt(10.0) * 2.0 * (x1 - x4) * (x1 - 1.0);
+ }
+ VLOG(1) << "\n" << jacobian_matrix;
+ }
+ return true;
+ }
+
+ virtual bool Plus(const double* state,
+ const double* delta,
+ double* state_plus_delta) const {
+ int delta_index = 0;
+ state_plus_delta[0] = (col1 ? state[0] + delta[delta_index++] : state[0]);
+ state_plus_delta[1] = (col2 ? state[1] + delta[delta_index++] : state[1]);
+ state_plus_delta[2] = (col3 ? state[2] + delta[delta_index++] : state[2]);
+ state_plus_delta[3] = (col4 ? state[3] + delta[delta_index++] : state[3]);
+ return true;
+ }
+
+ virtual int NumEffectiveParameters() const { return num_active_cols_; }
+ virtual int NumParameters() const { return 4; }
+ virtual int NumResiduals() const { return 4; }
+
+ private:
+ const int num_active_cols_;
+};
+
+// Templated function to hold a subset of the columns fixed and check
+// if the solver converges to the optimal values or not.
+template<bool col1, bool col2, bool col3, bool col4>
+void IsTrustRegionSolveSuccessful(TrustRegionStrategyType strategy_type) {
+ Solver::Options solver_options;
+ LinearSolver::Options linear_solver_options;
+ DenseQRSolver linear_solver(linear_solver_options);
+
+ double parameters[4] = { 3, -1, 0, 1.0 };
+
+ // If the column is inactive, then set its value to the optimal
+ // value.
+ parameters[0] = (col1 ? parameters[0] : 0.0);
+ parameters[1] = (col2 ? parameters[1] : 0.0);
+ parameters[2] = (col3 ? parameters[2] : 0.0);
+ parameters[3] = (col4 ? parameters[3] : 0.0);
+
+ PowellEvaluator2<col1, col2, col3, col4> powell_evaluator;
+ scoped_ptr<SparseMatrix> jacobian(powell_evaluator.CreateJacobian());
+
+ Minimizer::Options minimizer_options(solver_options);
+ minimizer_options.gradient_tolerance = 1e-26;
+ minimizer_options.function_tolerance = 1e-26;
+ minimizer_options.parameter_tolerance = 1e-26;
+ minimizer_options.evaluator = &powell_evaluator;
+ minimizer_options.jacobian = jacobian.get();
+
+ TrustRegionStrategy::Options trust_region_strategy_options;
+ trust_region_strategy_options.trust_region_strategy_type = strategy_type;
+ trust_region_strategy_options.linear_solver = &linear_solver;
+ trust_region_strategy_options.initial_radius = 1e4;
+ trust_region_strategy_options.max_radius = 1e20;
+ trust_region_strategy_options.lm_min_diagonal = 1e-6;
+ trust_region_strategy_options.lm_max_diagonal = 1e32;
+ scoped_ptr<TrustRegionStrategy> strategy(
+ TrustRegionStrategy::Create(trust_region_strategy_options));
+ minimizer_options.trust_region_strategy = strategy.get();
+
+ TrustRegionMinimizer minimizer;
+ Solver::Summary summary;
+ minimizer.Minimize(minimizer_options, parameters, &summary);
+
+ // The minimum is at x1 = x2 = x3 = x4 = 0.
+ EXPECT_NEAR(0.0, parameters[0], 0.001);
+ EXPECT_NEAR(0.0, parameters[1], 0.001);
+ EXPECT_NEAR(0.0, parameters[2], 0.001);
+ EXPECT_NEAR(0.0, parameters[3], 0.001);
+};
+
+TEST(TrustRegionMinimizer, PowellsSingularFunctionUsingLevenbergMarquardt) {
+ // This case is excluded because this has a local minimum and does
+ // not find the optimum. This should not affect the correctness of
+ // this test since we are testing all the other 14 combinations of
+ // column activations.
+ //
+ // IsSolveSuccessful<true, true, false, true>();
+
+ const TrustRegionStrategyType kStrategy = LEVENBERG_MARQUARDT;
+ IsTrustRegionSolveSuccessful<true, true, true, true >(kStrategy);
+ IsTrustRegionSolveSuccessful<true, true, true, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<true, false, true, true >(kStrategy);
+ IsTrustRegionSolveSuccessful<false, true, true, true >(kStrategy);
+ IsTrustRegionSolveSuccessful<true, true, false, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<true, false, true, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<false, true, true, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<true, false, false, true >(kStrategy);
+ IsTrustRegionSolveSuccessful<false, true, false, true >(kStrategy);
+ IsTrustRegionSolveSuccessful<false, false, true, true >(kStrategy);
+ IsTrustRegionSolveSuccessful<true, false, false, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<false, true, false, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<false, false, true, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<false, false, false, true >(kStrategy);
+}
+
+TEST(TrustRegionMinimizer, PowellsSingularFunctionUsingDogleg) {
+ // The following two cases are excluded because they encounter a local minimum.
+ //
+ // IsTrustRegionSolveSuccessful<true, true, false, true >(kStrategy);
+ // IsTrustRegionSolveSuccessful<true, true, true, true >(kStrategy);
+
+ const TrustRegionStrategyType kStrategy = DOGLEG;
+ IsTrustRegionSolveSuccessful<true, true, true, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<true, false, true, true >(kStrategy);
+ IsTrustRegionSolveSuccessful<false, true, true, true >(kStrategy);
+ IsTrustRegionSolveSuccessful<true, true, false, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<true, false, true, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<false, true, true, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<true, false, false, true >(kStrategy);
+ IsTrustRegionSolveSuccessful<false, true, false, true >(kStrategy);
+ IsTrustRegionSolveSuccessful<false, false, true, true >(kStrategy);
+ IsTrustRegionSolveSuccessful<true, false, false, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<false, true, false, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<false, false, true, false>(kStrategy);
+ IsTrustRegionSolveSuccessful<false, false, false, true >(kStrategy);
+}
+
+
+class CurveCostFunction : public CostFunction {
+ public:
+ CurveCostFunction(int num_vertices, double target_length)
+ : num_vertices_(num_vertices), target_length_(target_length) {
+ set_num_residuals(1);
+ for (int i = 0; i < num_vertices_; ++i) {
+ mutable_parameter_block_sizes()->push_back(2);
+ }
+ }
+
+ bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const {
+ residuals[0] = target_length_;
+
+ for (int i = 0; i < num_vertices_; ++i) {
+ int prev = (num_vertices_ + i - 1) % num_vertices_;
+ double length = 0.0;
+ for (int dim = 0; dim < 2; dim++) {
+ const double diff = parameters[prev][dim] - parameters[i][dim];
+ length += diff * diff;
+ }
+ residuals[0] -= sqrt(length);
+ }
+
+ if (jacobians == NULL) {
+ return true;
+ }
+
+ for (int i = 0; i < num_vertices_; ++i) {
+ if (jacobians[i] != NULL) {
+ int prev = (num_vertices_ + i - 1) % num_vertices_;
+ int next = (i + 1) % num_vertices_;
+
+ double u[2], v[2];
+ double norm_u = 0., norm_v = 0.;
+ for (int dim = 0; dim < 2; dim++) {
+ u[dim] = parameters[i][dim] - parameters[prev][dim];
+ norm_u += u[dim] * u[dim];
+ v[dim] = parameters[next][dim] - parameters[i][dim];
+ norm_v += v[dim] * v[dim];
+ }
+
+ norm_u = sqrt(norm_u);
+ norm_v = sqrt(norm_v);
+
+ for (int dim = 0; dim < 2; dim++) {
+ jacobians[i][dim] = 0.;
+
+ if (norm_u > std::numeric_limits< double >::min()) {
+ jacobians[i][dim] -= u[dim] / norm_u;
+ }
+
+ if (norm_v > std::numeric_limits< double >::min()) {
+ jacobians[i][dim] += v[dim] / norm_v;
+ }
+ }
+ }
+ }
+
+ return true;
+ }
+
+ private:
+ int num_vertices_;
+ double target_length_;
+};
+
+TEST(TrustRegionMinimizer, JacobiScalingTest) {
+ int N = 6;
+ std::vector< double* > y(N);
+ const double pi = 3.1415926535897932384626433;
+ for (int i = 0; i < N; i++) {
+ double theta = i * 2. * pi/ static_cast< double >(N);
+ y[i] = new double[2];
+ y[i][0] = cos(theta);
+ y[i][1] = sin(theta);
+ }
+
+ Problem problem;
+ problem.AddResidualBlock(new CurveCostFunction(N, 10.), NULL, y);
+ Solver::Options options;
+ options.linear_solver_type = ceres::DENSE_QR;
+ Solver::Summary summary;
+ Solve(options, &problem, &summary);
+ EXPECT_LE(summary.final_cost, 1e-10);
+
+ for (int i = 0; i < N; i++) {
+ delete y[i];
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/trust_region_strategy.cc b/internal/ceres/trust_region_strategy.cc
new file mode 100644
index 0000000..89bc19d
--- /dev/null
+++ b/internal/ceres/trust_region_strategy.cc
@@ -0,0 +1,27 @@
+#include "ceres/trust_region_strategy.h"
+#include "ceres/dogleg_strategy.h"
+#include "ceres/levenberg_marquardt_strategy.h"
+
+namespace ceres {
+namespace internal {
+
+TrustRegionStrategy::~TrustRegionStrategy() {}
+
+TrustRegionStrategy* TrustRegionStrategy::Create(const Options& options) {
+ switch (options.trust_region_strategy_type) {
+ case LEVENBERG_MARQUARDT:
+ return new LevenbergMarquardtStrategy(options);
+ case DOGLEG:
+ return new DoglegStrategy(options);
+ default:
+ LOG(FATAL) << "Unknown trust region strategy: "
+ << options.trust_region_strategy_type;
+ }
+
+ LOG(FATAL) << "Unknown trust region strategy: "
+ << options.trust_region_strategy_type;
+ return NULL;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/trust_region_strategy.h b/internal/ceres/trust_region_strategy.h
new file mode 100644
index 0000000..391da97
--- /dev/null
+++ b/internal/ceres/trust_region_strategy.h
@@ -0,0 +1,148 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_TRUST_REGION_STRATEGY_H_
+#define CERES_INTERNAL_TRUST_REGION_STRATEGY_H_
+
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class LinearSolver;
+class SparseMatrix;
+
+// Interface for classes implementing various trust region strategies
+// for nonlinear least squares problems.
+//
+// The object is expected to maintain and update a trust region
+// radius, which it then uses to solve for the trust region step using
+// the jacobian matrix and residual vector.
+//
+// Here the term trust region radius is used loosely, as the strategy
+// is free to treat it as guidance and violate it as need be. e.g.,
+// the LevenbergMarquardtStrategy uses the inverse of the trust region
+// radius to scale the damping term, which controls the step size, but
+// does not set a hard limit on its size.
+class TrustRegionStrategy {
+public:
+ struct Options {
+ Options()
+ : trust_region_strategy_type(LEVENBERG_MARQUARDT),
+ initial_radius(1e4),
+ max_radius(1e32),
+ lm_min_diagonal(1e-6),
+ lm_max_diagonal(1e32),
+ dogleg_type(TRADITIONAL_DOGLEG) {
+ }
+
+ TrustRegionStrategyType trust_region_strategy_type;
+ // Linear solver used for actually solving the trust region step.
+ LinearSolver* linear_solver;
+ double initial_radius;
+ double max_radius;
+
+ // Minimum and maximum values of the diagonal damping matrix used
+ // by LevenbergMarquardtStrategy. The DoglegStrategy also uses
+ // these bounds to construct a regularizing diagonal to ensure
+ // that the Gauss-Newton step computation is of full rank.
+ double lm_min_diagonal;
+ double lm_max_diagonal;
+
+ // Further specify which dogleg method to use
+ DoglegType dogleg_type;
+ };
+
+ // Per solve options.
+ struct PerSolveOptions {
+ // Forcing sequence for inexact solves.
+ double eta;
+ };
+
+ struct Summary {
+ Summary()
+ : residual_norm(0.0),
+ num_iterations(-1),
+ termination_type(FAILURE) {
+ }
+
+ // If the trust region problem is,
+ //
+ // 1/2 x'Ax + b'x + c,
+ //
+ // then
+ //
+ // residual_norm = |Ax -b|
+ double residual_norm;
+
+ // Number of iterations used by the linear solver. If a linear
+ // solver was not called (e.g., DogLegStrategy after an
+ // unsuccessful step), then this would be zero.
+ int num_iterations;
+
+ // Status of the linear solver used to solve the Newton system.
+ LinearSolverTerminationType termination_type;
+ };
+
+ virtual ~TrustRegionStrategy();
+
+ // Use the current radius to solve for the trust region step.
+ virtual Summary ComputeStep(const PerSolveOptions& per_solve_options,
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step) = 0;
+
+ // Inform the strategy that the current step has been accepted, and
+ // that the ratio of the decrease in the non-linear objective to the
+ // decrease in the trust region model is step_quality.
+ virtual void StepAccepted(double step_quality) = 0;
+
+ // Inform the strategy that the current step has been rejected, and
+ // that the ratio of the decrease in the non-linear objective to the
+ // decrease in the trust region model is step_quality.
+ virtual void StepRejected(double step_quality) = 0;
+
+ // Inform the strategy that the current step has been rejected
+ // because it was found to be numerically invalid.
+ // StepRejected/StepAccepted will not be called for this step, and
+ // the strategy is free to do what it wants with this information.
+ virtual void StepIsInvalid() = 0;
+
+ // Current trust region radius.
+ virtual double Radius() const = 0;
+
+ // Factory.
+ static TrustRegionStrategy* Create(const Options& options);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_TRUST_REGION_STRATEGY_H_
diff --git a/internal/ceres/types.cc b/internal/ceres/types.cc
new file mode 100644
index 0000000..fc33a4c
--- /dev/null
+++ b/internal/ceres/types.cc
@@ -0,0 +1,209 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <algorithm>
+#include <cctype>
+#include <string>
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+#define CASESTR(x) case x: return #x
+
+#define STRENUM(x) if (value == #x) { *type = x; return true;}
+
+void UpperCase(string* input) {
+ std::transform(input->begin(), input->end(), input->begin(), ::toupper);
+}
+
+const char* LinearSolverTypeToString(LinearSolverType solver_type) {
+ switch (solver_type) {
+ CASESTR(DENSE_NORMAL_CHOLESKY);
+ CASESTR(DENSE_QR);
+ CASESTR(SPARSE_NORMAL_CHOLESKY);
+ CASESTR(DENSE_SCHUR);
+ CASESTR(SPARSE_SCHUR);
+ CASESTR(ITERATIVE_SCHUR);
+ CASESTR(CGNR);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+bool StringToLinearSolverType(string value, LinearSolverType* type) {
+ UpperCase(&value);
+ STRENUM(DENSE_NORMAL_CHOLESKY);
+ STRENUM(DENSE_QR);
+ STRENUM(SPARSE_NORMAL_CHOLESKY);
+ STRENUM(DENSE_SCHUR);
+ STRENUM(SPARSE_SCHUR);
+ STRENUM(ITERATIVE_SCHUR);
+ STRENUM(CGNR);
+ return false;
+}
+
+const char* PreconditionerTypeToString(
+ PreconditionerType preconditioner_type) {
+ switch (preconditioner_type) {
+ CASESTR(IDENTITY);
+ CASESTR(JACOBI);
+ CASESTR(SCHUR_JACOBI);
+ CASESTR(CLUSTER_JACOBI);
+ CASESTR(CLUSTER_TRIDIAGONAL);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+bool StringToPreconditionerType(string value, PreconditionerType* type) {
+ UpperCase(&value);
+ STRENUM(IDENTITY);
+ STRENUM(JACOBI);
+ STRENUM(SCHUR_JACOBI);
+ STRENUM(CLUSTER_JACOBI);
+ STRENUM(CLUSTER_TRIDIAGONAL);
+ return false;
+}
+
+const char* SparseLinearAlgebraLibraryTypeToString(
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type) {
+ switch (sparse_linear_algebra_library_type) {
+ CASESTR(SUITE_SPARSE);
+ CASESTR(CX_SPARSE);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+
+bool StringToSparseLinearAlgebraLibraryType(
+ string value,
+ SparseLinearAlgebraLibraryType* type) {
+ UpperCase(&value);
+ STRENUM(SUITE_SPARSE);
+ STRENUM(CX_SPARSE);
+ return false;
+}
+
+const char* TrustRegionStrategyTypeToString(
+ TrustRegionStrategyType trust_region_strategy_type) {
+ switch (trust_region_strategy_type) {
+ CASESTR(LEVENBERG_MARQUARDT);
+ CASESTR(DOGLEG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+bool StringToTrustRegionStrategyType(string value,
+ TrustRegionStrategyType* type) {
+ UpperCase(&value);
+ STRENUM(LEVENBERG_MARQUARDT);
+ STRENUM(DOGLEG);
+ return false;
+}
+
+const char* DoglegTypeToString(DoglegType dogleg_type) {
+ switch (dogleg_type) {
+ CASESTR(TRADITIONAL_DOGLEG);
+ CASESTR(SUBSPACE_DOGLEG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+bool StringToDoglegType(string value, DoglegType* type) {
+ UpperCase(&value);
+ STRENUM(TRADITIONAL_DOGLEG);
+ STRENUM(SUBSPACE_DOGLEG);
+ return false;
+}
+
+const char* SolverTerminationTypeToString(
+ SolverTerminationType termination_type) {
+ switch (termination_type) {
+ CASESTR(NO_CONVERGENCE);
+ CASESTR(FUNCTION_TOLERANCE);
+ CASESTR(GRADIENT_TOLERANCE);
+ CASESTR(PARAMETER_TOLERANCE);
+ CASESTR(NUMERICAL_FAILURE);
+ CASESTR(USER_ABORT);
+ CASESTR(USER_SUCCESS);
+ CASESTR(DID_NOT_RUN);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+const char* LinearSolverTerminationTypeToString(
+ LinearSolverTerminationType termination_type) {
+ switch (termination_type) {
+ CASESTR(TOLERANCE);
+ CASESTR(MAX_ITERATIONS);
+ CASESTR(STAGNATION);
+ CASESTR(FAILURE);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+#undef CASESTR
+#undef STRENUM
+
+bool IsSchurType(LinearSolverType type) {
+ return ((type == SPARSE_SCHUR) ||
+ (type == DENSE_SCHUR) ||
+ (type == ITERATIVE_SCHUR));
+}
+
+bool IsSparseLinearAlgebraLibraryTypeAvailable(
+ SparseLinearAlgebraLibraryType type) {
+ if (type == SUITE_SPARSE) {
+#ifdef CERES_NO_SUITESPARSE
+ return false;
+#else
+ return true;
+#endif
+ }
+
+ if (type == CX_SPARSE) {
+#ifdef CERES_NO_CXSPARSE
+ return false;
+#else
+ return true;
+#endif
+ }
+
+ LOG(WARNING) << "Unknown sparse linear algebra library " << type;
+ return false;
+}
+
+} // namespace ceres
diff --git a/internal/ceres/unsymmetric_linear_solver_test.cc b/internal/ceres/unsymmetric_linear_solver_test.cc
new file mode 100644
index 0000000..0b0d593
--- /dev/null
+++ b/internal/ceres/unsymmetric_linear_solver_test.cc
@@ -0,0 +1,139 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/linear_solver.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+
+namespace ceres {
+namespace internal {
+
+class UnsymmetricLinearSolverTest : public ::testing::Test {
+ protected :
+ virtual void SetUp() {
+ scoped_ptr<LinearLeastSquaresProblem> problem(
+ CreateLinearLeastSquaresProblemFromId(0));
+
+ CHECK_NOTNULL(problem.get());
+ A_.reset(down_cast<TripletSparseMatrix*>(problem->A.release()));
+ b_.reset(problem->b.release());
+ D_.reset(problem->D.release());
+ sol_unregularized_.reset(problem->x.release());
+ sol_regularized_.reset(problem->x_D.release());
+ }
+
+ void TestSolver(
+ LinearSolverType linear_solver_type,
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library) {
+ LinearSolver::Options options;
+ options.type = linear_solver_type;
+ options.sparse_linear_algebra_library = sparse_linear_algebra_library;
+ options.use_block_amd = false;
+ scoped_ptr<LinearSolver> solver(LinearSolver::Create(options));
+
+ LinearSolver::PerSolveOptions per_solve_options;
+ LinearSolver::Summary unregularized_solve_summary;
+ LinearSolver::Summary regularized_solve_summary;
+ Vector x_unregularized(A_->num_cols());
+ Vector x_regularized(A_->num_cols());
+
+ scoped_ptr<SparseMatrix> transformed_A;
+
+ if (linear_solver_type == DENSE_QR ||
+ linear_solver_type == DENSE_NORMAL_CHOLESKY) {
+ transformed_A.reset(new DenseSparseMatrix(*A_));
+ } else if (linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
+ transformed_A.reset(new CompressedRowSparseMatrix(*A_));
+ } else {
+ LOG(FATAL) << "Unknown linear solver : " << linear_solver_type;
+ }
+ // Unregularized
+ unregularized_solve_summary =
+ solver->Solve(transformed_A.get(),
+ b_.get(),
+ per_solve_options,
+ x_unregularized.data());
+
+ // Regularized solution
+ per_solve_options.D = D_.get();
+ regularized_solve_summary =
+ solver->Solve(transformed_A.get(),
+ b_.get(),
+ per_solve_options,
+ x_regularized.data());
+
+ EXPECT_EQ(unregularized_solve_summary.termination_type, TOLERANCE);
+
+ for (int i = 0; i < A_->num_cols(); ++i) {
+ EXPECT_NEAR(sol_unregularized_[i], x_unregularized[i], 1e-8);
+ }
+
+ EXPECT_EQ(regularized_solve_summary.termination_type, TOLERANCE);
+ for (int i = 0; i < A_->num_cols(); ++i) {
+ EXPECT_NEAR(sol_regularized_[i], x_regularized[i], 1e-8);
+ }
+ }
+
+ scoped_ptr<TripletSparseMatrix> A_;
+ scoped_array<double> b_;
+ scoped_array<double> D_;
+ scoped_array<double> sol_unregularized_;
+ scoped_array<double> sol_regularized_;
+};
+
+TEST_F(UnsymmetricLinearSolverTest, DenseQR) {
+ TestSolver(DENSE_QR, SUITE_SPARSE);
+}
+
+TEST_F(UnsymmetricLinearSolverTest, DenseNormalCholesky) {
+ TestSolver(DENSE_NORMAL_CHOLESKY, SUITE_SPARSE);
+}
+
+#ifndef CERES_NO_SUITESPARSE
+TEST_F(UnsymmetricLinearSolverTest, SparseNormalCholeskyUsingSuiteSparse) {
+ TestSolver(SPARSE_NORMAL_CHOLESKY, SUITE_SPARSE);
+}
+#endif
+
+#ifndef CERES_NO_CXSPARSE
+TEST_F(UnsymmetricLinearSolverTest, SparseNormalCholeskyUsingCXSparse) {
+ TestSolver(SPARSE_NORMAL_CHOLESKY, CX_SPARSE);
+}
+#endif
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/visibility.cc b/internal/ceres/visibility.cc
new file mode 100644
index 0000000..9d80654
--- /dev/null
+++ b/internal/ceres/visibility.cc
@@ -0,0 +1,149 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: kushalav@google.com (Avanish Kushal)
+
+#include <cmath>
+#include <ctime>
+#include <algorithm>
+#include <set>
+#include <vector>
+#include <utility>
+#include "ceres/block_structure.h"
+#include "ceres/collections_port.h"
+#include "ceres/graph.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+void ComputeVisibility(const CompressedRowBlockStructure& block_structure,
+ const int num_eliminate_blocks,
+ vector< set<int> >* visibility) {
+ CHECK_NOTNULL(visibility);
+
+ // Clear the visibility vector and resize it to hold a
+ // vector for each camera.
+ visibility->resize(0);
+ visibility->resize(block_structure.cols.size() - num_eliminate_blocks);
+
+ for (int i = 0; i < block_structure.rows.size(); ++i) {
+ const vector<Cell>& cells = block_structure.rows[i].cells;
+ int block_id = cells[0].block_id;
+ // If the first block is not an e_block, then skip this row block.
+ if (block_id >= num_eliminate_blocks) {
+ continue;
+ }
+
+ for (int j = 1; j < cells.size(); ++j) {
+ int camera_block_id = cells[j].block_id - num_eliminate_blocks;
+ DCHECK_GE(camera_block_id, 0);
+ DCHECK_LT(camera_block_id, visibility->size());
+ (*visibility)[camera_block_id].insert(block_id);
+ }
+ }
+}
+
+Graph<int>* CreateSchurComplementGraph(const vector<set<int> >& visibility) {
+ const time_t start_time = time(NULL);
+ // Compute the number of e_blocks/point blocks. Since the visibility
+ // set for each e_block/camera contains the set of e_blocks/points
+ // visible to it, we find the maximum across all visibility sets.
+ int num_points = 0;
+ for (int i = 0; i < visibility.size(); i++) {
+ if (visibility[i].size() > 0) {
+ num_points = max(num_points, (*visibility[i].rbegin()) + 1);
+ }
+ }
+
+ // Invert the visibility. The input is a camera->point mapping,
+ // which tells us which points are visible in which
+ // cameras. However, to compute the sparsity structure of the Schur
+ // Complement efficiently, its better to have the point->camera
+ // mapping.
+ vector<set<int> > inverse_visibility(num_points);
+ for (int i = 0; i < visibility.size(); i++) {
+ const set<int>& visibility_set = visibility[i];
+ for (set<int>::const_iterator it = visibility_set.begin();
+ it != visibility_set.end();
+ ++it) {
+ inverse_visibility[*it].insert(i);
+ }
+ }
+
+ // Map from camera pairs to number of points visible to both cameras
+ // in the pair.
+ HashMap<pair<int, int>, int > camera_pairs;
+
+ // Count the number of points visible to each camera/f_block pair.
+ for (vector<set<int> >::const_iterator it = inverse_visibility.begin();
+ it != inverse_visibility.end();
+ ++it) {
+ const set<int>& inverse_visibility_set = *it;
+ for (set<int>::const_iterator camera1 = inverse_visibility_set.begin();
+ camera1 != inverse_visibility_set.end();
+ ++camera1) {
+ set<int>::const_iterator camera2 = camera1;
+ for (++camera2; camera2 != inverse_visibility_set.end(); ++camera2) {
+ ++(camera_pairs[make_pair(*camera1, *camera2)]);
+ }
+ }
+ }
+
+ Graph<int>* graph = new Graph<int>();
+
+ // Add vertices and initialize the pairs for self edges so that self
+ // edges are guaranteed. This is needed for the Canonical views
+ // algorithm to work correctly.
+ static const double kSelfEdgeWeight = 1.0;
+ for (int i = 0; i < visibility.size(); ++i) {
+ graph->AddVertex(i);
+ graph->AddEdge(i, i, kSelfEdgeWeight);
+ }
+
+ // Add an edge for each camera pair.
+ for (HashMap<pair<int, int>, int>::const_iterator it = camera_pairs.begin();
+ it != camera_pairs.end();
+ ++it) {
+ const int camera1 = it->first.first;
+ const int camera2 = it->first.second;
+ CHECK_NE(camera1, camera2);
+
+ const int count = it->second;
+ // Static cast necessary for Windows.
+ const double weight = static_cast<double>(count) /
+ (sqrt(static_cast<double>(visibility[camera1].size() * visibility[camera2].size())));
+ graph->AddEdge(camera1, camera2, weight);
+ }
+
+ VLOG(2) << "Schur complement graph time: " << (time(NULL) - start_time);
+ return graph;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/visibility.h b/internal/ceres/visibility.h
new file mode 100644
index 0000000..f29e3c6
--- /dev/null
+++ b/internal/ceres/visibility.h
@@ -0,0 +1,77 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: kushalav@google.com (Avanish Kushal)
+// sameeragarwal@google.com (Sameer Agarwal)
+//
+// Functions to manipulate visibility information from the block
+// structure of sparse matrices.
+
+#ifndef CERES_INTERNAL_VISIBILITY_H_
+#define CERES_INTERNAL_VISIBILITY_H_
+
+#include <set>
+#include <vector>
+#include "ceres/graph.h"
+
+namespace ceres {
+namespace internal {
+
+struct CompressedRowBlockStructure;
+
+// Given a compressed row block structure, computes the set of
+// e_blocks "visible" to each f_block. If an e_block co-occurs with an
+// f_block in a residual block, it is visible to the f_block. The
+// first num_eliminate_blocks columns blocks are e_blocks and the rest
+// f_blocks.
+//
+// In a structure from motion problem, e_blocks correspond to 3D
+// points and f_blocks correspond to cameras.
+void ComputeVisibility(const CompressedRowBlockStructure& block_structure,
+ int num_eliminate_blocks,
+ vector<set<int> >* visibility);
+
+// Given f_block visibility as computed by the ComputeVisibility
+// function above, construct and return a graph whose vertices are
+// f_blocks and an edge connects two vertices if they have atleast one
+// e_block in common. The weight of this edge is normalized dot
+// product between the visibility vectors of the two
+// vertices/f_blocks.
+//
+// This graph reflects the sparsity structure of reduced camera
+// matrix/Schur complement matrix obtained by eliminating the e_blocks
+// from the normal equations.
+//
+// Caller acquires ownership of the returned Graph pointer
+// (heap-allocated).
+Graph<int>* CreateSchurComplementGraph(const vector<set<int> >& visibility);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_VISIBILITY_H_
diff --git a/internal/ceres/visibility_based_preconditioner.cc b/internal/ceres/visibility_based_preconditioner.cc
new file mode 100644
index 0000000..ae26d91
--- /dev/null
+++ b/internal/ceres/visibility_based_preconditioner.cc
@@ -0,0 +1,613 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/visibility_based_preconditioner.h"
+
+#include <algorithm>
+#include <functional>
+#include <iterator>
+#include <set>
+#include <utility>
+#include <vector>
+#include "Eigen/Dense"
+#include "ceres/block_random_access_sparse_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/canonical_views_clustering.h"
+#include "ceres/collections_port.h"
+#include "ceres/detect_structure.h"
+#include "ceres/graph.h"
+#include "ceres/graph_algorithms.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/visibility.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// TODO(sameeragarwal): Currently these are magic weights for the
+// preconditioner construction. Move these higher up into the Options
+// struct and provide some guidelines for choosing them.
+//
+// This will require some more work on the clustering algorithm and
+// possibly some more refactoring of the code.
+static const double kSizePenaltyWeight = 3.0;
+static const double kSimilarityPenaltyWeight = 0.0;
+
+#ifndef CERES_NO_SUITESPARSE
+VisibilityBasedPreconditioner::VisibilityBasedPreconditioner(
+ const CompressedRowBlockStructure& bs,
+ const LinearSolver::Options& options)
+ : options_(options),
+ num_blocks_(0),
+ num_clusters_(0),
+ factor_(NULL) {
+ CHECK_GT(options_.elimination_groups.size(), 1);
+ CHECK_GT(options_.elimination_groups[0], 0);
+ CHECK(options_.preconditioner_type == SCHUR_JACOBI ||
+ options_.preconditioner_type == CLUSTER_JACOBI ||
+ options_.preconditioner_type == CLUSTER_TRIDIAGONAL)
+ << "Unknown preconditioner type: " << options_.preconditioner_type;
+ num_blocks_ = bs.cols.size() - options_.elimination_groups[0];
+ CHECK_GT(num_blocks_, 0)
+ << "Jacobian should have atleast 1 f_block for "
+ << "visibility based preconditioning.";
+
+ // Vector of camera block sizes
+ block_size_.resize(num_blocks_);
+ for (int i = 0; i < num_blocks_; ++i) {
+ block_size_[i] = bs.cols[i + options_.elimination_groups[0]].size;
+ }
+
+ const time_t start_time = time(NULL);
+ switch (options_.preconditioner_type) {
+ case SCHUR_JACOBI:
+ ComputeSchurJacobiSparsity(bs);
+ break;
+ case CLUSTER_JACOBI:
+ ComputeClusterJacobiSparsity(bs);
+ break;
+ case CLUSTER_TRIDIAGONAL:
+ ComputeClusterTridiagonalSparsity(bs);
+ break;
+ default:
+ LOG(FATAL) << "Unknown preconditioner type";
+ }
+ const time_t structure_time = time(NULL);
+ InitStorage(bs);
+ const time_t storage_time = time(NULL);
+ InitEliminator(bs);
+ const time_t eliminator_time = time(NULL);
+
+ // Allocate temporary storage for a vector used during
+ // RightMultiply.
+ tmp_rhs_ = CHECK_NOTNULL(ss_.CreateDenseVector(NULL,
+ m_->num_rows(),
+ m_->num_rows()));
+ const time_t init_time = time(NULL);
+ VLOG(2) << "init time: "
+ << init_time - start_time
+ << " structure time: " << structure_time - start_time
+ << " storage time:" << storage_time - structure_time
+ << " eliminator time: " << eliminator_time - storage_time;
+}
+
+VisibilityBasedPreconditioner::~VisibilityBasedPreconditioner() {
+ if (factor_ != NULL) {
+ ss_.Free(factor_);
+ factor_ = NULL;
+ }
+ if (tmp_rhs_ != NULL) {
+ ss_.Free(tmp_rhs_);
+ tmp_rhs_ = NULL;
+ }
+}
+
+// Determine the sparsity structure of the SCHUR_JACOBI
+// preconditioner. SCHUR_JACOBI is an extreme case of a visibility
+// based preconditioner where each camera block corresponds to a
+// cluster and there is no interaction between clusters.
+void VisibilityBasedPreconditioner::ComputeSchurJacobiSparsity(
+ const CompressedRowBlockStructure& bs) {
+ num_clusters_ = num_blocks_;
+ cluster_membership_.resize(num_blocks_);
+ cluster_pairs_.clear();
+
+ // Each camea block is a member of its own cluster and the only
+ // cluster pairs are the self edges (i,i).
+ for (int i = 0; i < num_clusters_; ++i) {
+ cluster_membership_[i] = i;
+ cluster_pairs_.insert(make_pair(i, i));
+ }
+}
+
+// Determine the sparsity structure of the CLUSTER_JACOBI
+// preconditioner. It clusters cameras using their scene
+// visibility. The clusters form the diagonal blocks of the
+// preconditioner matrix.
+void VisibilityBasedPreconditioner::ComputeClusterJacobiSparsity(
+ const CompressedRowBlockStructure& bs) {
+ vector<set<int> > visibility;
+ ComputeVisibility(bs, options_.elimination_groups[0], &visibility);
+ CHECK_EQ(num_blocks_, visibility.size());
+ ClusterCameras(visibility);
+ cluster_pairs_.clear();
+ for (int i = 0; i < num_clusters_; ++i) {
+ cluster_pairs_.insert(make_pair(i, i));
+ }
+}
+
+// Determine the sparsity structure of the CLUSTER_TRIDIAGONAL
+// preconditioner. It clusters cameras using using the scene
+// visibility and then finds the strongly interacting pairs of
+// clusters by constructing another graph with the clusters as
+// vertices and approximating it with a degree-2 maximum spanning
+// forest. The set of edges in this forest are the cluster pairs.
+void VisibilityBasedPreconditioner::ComputeClusterTridiagonalSparsity(
+ const CompressedRowBlockStructure& bs) {
+ vector<set<int> > visibility;
+ ComputeVisibility(bs, options_.elimination_groups[0], &visibility);
+ CHECK_EQ(num_blocks_, visibility.size());
+ ClusterCameras(visibility);
+
+ // Construct a weighted graph on the set of clusters, where the
+ // edges are the number of 3D points/e_blocks visible in both the
+ // clusters at the ends of the edge. Return an approximate degree-2
+ // maximum spanning forest of this graph.
+ vector<set<int> > cluster_visibility;
+ ComputeClusterVisibility(visibility, &cluster_visibility);
+ scoped_ptr<Graph<int> > cluster_graph(
+ CHECK_NOTNULL(CreateClusterGraph(cluster_visibility)));
+ scoped_ptr<Graph<int> > forest(
+ CHECK_NOTNULL(Degree2MaximumSpanningForest(*cluster_graph)));
+ ForestToClusterPairs(*forest, &cluster_pairs_);
+}
+
+// Allocate storage for the preconditioner matrix.
+void VisibilityBasedPreconditioner::InitStorage(
+ const CompressedRowBlockStructure& bs) {
+ ComputeBlockPairsInPreconditioner(bs);
+ m_.reset(new BlockRandomAccessSparseMatrix(block_size_, block_pairs_));
+}
+
+// Call the canonical views algorithm and cluster the cameras based on
+// their visibility sets. The visibility set of a camera is the set of
+// e_blocks/3D points in the scene that are seen by it.
+//
+// The cluster_membership_ vector is updated to indicate cluster
+// memberships for each camera block.
+void VisibilityBasedPreconditioner::ClusterCameras(
+ const vector<set<int> >& visibility) {
+ scoped_ptr<Graph<int> > schur_complement_graph(
+ CHECK_NOTNULL(CreateSchurComplementGraph(visibility)));
+
+ CanonicalViewsClusteringOptions options;
+ options.size_penalty_weight = kSizePenaltyWeight;
+ options.similarity_penalty_weight = kSimilarityPenaltyWeight;
+
+ vector<int> centers;
+ HashMap<int, int> membership;
+ ComputeCanonicalViewsClustering(*schur_complement_graph,
+ options,
+ &centers,
+ &membership);
+ num_clusters_ = centers.size();
+ CHECK_GT(num_clusters_, 0);
+ VLOG(2) << "num_clusters: " << num_clusters_;
+ FlattenMembershipMap(membership, &cluster_membership_);
+}
+
+// Compute the block sparsity structure of the Schur complement
+// matrix. For each pair of cameras contributing a non-zero cell to
+// the schur complement, determine if that cell is present in the
+// preconditioner or not.
+//
+// A pair of cameras contribute a cell to the preconditioner if they
+// are part of the same cluster or if the the two clusters that they
+// belong have an edge connecting them in the degree-2 maximum
+// spanning forest.
+//
+// For example, a camera pair (i,j) where i belonges to cluster1 and
+// j belongs to cluster2 (assume that cluster1 < cluster2).
+//
+// The cell corresponding to (i,j) is present in the preconditioner
+// if cluster1 == cluster2 or the pair (cluster1, cluster2) were
+// connected by an edge in the degree-2 maximum spanning forest.
+//
+// Since we have already expanded the forest into a set of camera
+// pairs/edges, including self edges, the check can be reduced to
+// checking membership of (cluster1, cluster2) in cluster_pairs_.
+void VisibilityBasedPreconditioner::ComputeBlockPairsInPreconditioner(
+ const CompressedRowBlockStructure& bs) {
+ block_pairs_.clear();
+ for (int i = 0; i < num_blocks_; ++i) {
+ block_pairs_.insert(make_pair(i, i));
+ }
+
+ int r = 0;
+ const int num_row_blocks = bs.rows.size();
+ const int num_eliminate_blocks = options_.elimination_groups[0];
+
+ // Iterate over each row of the matrix. The block structure of the
+ // matrix is assumed to be sorted in order of the e_blocks/point
+ // blocks. Thus all row blocks containing an e_block/point occur
+ // contiguously. Further, if present, an e_block is always the first
+ // parameter block in each row block. These structural assumptions
+ // are common to all Schur complement based solvers in Ceres.
+ //
+ // For each e_block/point block we identify the set of cameras
+ // seeing it. The cross product of this set with itself is the set
+ // of non-zero cells contibuted by this e_block.
+ //
+ // The time complexity of this is O(nm^2) where, n is the number of
+ // 3d points and m is the maximum number of cameras seeing any
+ // point, which for most scenes is a fairly small number.
+ while (r < num_row_blocks) {
+ int e_block_id = bs.rows[r].cells.front().block_id;
+ if (e_block_id >= num_eliminate_blocks) {
+ // Skip the rows whose first block is an f_block.
+ break;
+ }
+
+ set<int> f_blocks;
+ for (; r < num_row_blocks; ++r) {
+ const CompressedRow& row = bs.rows[r];
+ if (row.cells.front().block_id != e_block_id) {
+ break;
+ }
+
+ // Iterate over the blocks in the row, ignoring the first block
+ // since it is the one to be eliminated and adding the rest to
+ // the list of f_blocks associated with this e_block.
+ for (int c = 1; c < row.cells.size(); ++c) {
+ const Cell& cell = row.cells[c];
+ const int f_block_id = cell.block_id - num_eliminate_blocks;
+ CHECK_GE(f_block_id, 0);
+ f_blocks.insert(f_block_id);
+ }
+ }
+
+ for (set<int>::const_iterator block1 = f_blocks.begin();
+ block1 != f_blocks.end();
+ ++block1) {
+ set<int>::const_iterator block2 = block1;
+ ++block2;
+ for (; block2 != f_blocks.end(); ++block2) {
+ if (IsBlockPairInPreconditioner(*block1, *block2)) {
+ block_pairs_.insert(make_pair(*block1, *block2));
+ }
+ }
+ }
+ }
+
+ // The remaining rows which do not contain any e_blocks.
+ for (; r < num_row_blocks; ++r) {
+ const CompressedRow& row = bs.rows[r];
+ CHECK_GE(row.cells.front().block_id, num_eliminate_blocks);
+ for (int i = 0; i < row.cells.size(); ++i) {
+ const int block1 = row.cells[i].block_id - num_eliminate_blocks;
+ for (int j = 0; j < row.cells.size(); ++j) {
+ const int block2 = row.cells[j].block_id - num_eliminate_blocks;
+ if (block1 <= block2) {
+ if (IsBlockPairInPreconditioner(block1, block2)) {
+ block_pairs_.insert(make_pair(block1, block2));
+ }
+ }
+ }
+ }
+ }
+
+ VLOG(1) << "Block pair stats: " << block_pairs_.size();
+}
+
+// Initialize the SchurEliminator.
+void VisibilityBasedPreconditioner::InitEliminator(
+ const CompressedRowBlockStructure& bs) {
+ LinearSolver::Options eliminator_options;
+
+ eliminator_options.elimination_groups = options_.elimination_groups;
+ eliminator_options.num_threads = options_.num_threads;
+
+ DetectStructure(bs, options_.elimination_groups[0],
+ &eliminator_options.row_block_size,
+ &eliminator_options.e_block_size,
+ &eliminator_options.f_block_size);
+
+ eliminator_.reset(SchurEliminatorBase::Create(eliminator_options));
+ eliminator_->Init(options_.elimination_groups[0], &bs);
+}
+
+// Update the values of the preconditioner matrix and factorize it.
+bool VisibilityBasedPreconditioner::Update(const BlockSparseMatrixBase& A,
+ const double* D) {
+ const time_t start_time = time(NULL);
+ const int num_rows = m_->num_rows();
+ CHECK_GT(num_rows, 0);
+
+ // We need a dummy rhs vector and a dummy b vector since the Schur
+ // eliminator combines the computation of the reduced camera matrix
+ // with the computation of the right hand side of that linear
+ // system.
+ //
+ // TODO(sameeragarwal): Perhaps its worth refactoring the
+ // SchurEliminator::Eliminate function to allow NULL for the rhs. As
+ // of now it does not seem to be worth the effort.
+ Vector rhs = Vector::Zero(m_->num_rows());
+ Vector b = Vector::Zero(A.num_rows());
+
+ // Compute a subset of the entries of the Schur complement.
+ eliminator_->Eliminate(&A, b.data(), D, m_.get(), rhs.data());
+
+ // Try factorizing the matrix. For SCHUR_JACOBI and CLUSTER_JACOBI,
+ // this should always succeed modulo some numerical/conditioning
+ // problems. For CLUSTER_TRIDIAGONAL, in general the preconditioner
+ // matrix as constructed is not positive definite. However, we will
+ // go ahead and try factorizing it. If it works, great, otherwise we
+ // scale all the cells in the preconditioner corresponding to the
+ // edges in the degree-2 forest and that guarantees positive
+ // definiteness. The proof of this fact can be found in Lemma 1 in
+ // "Visibility Based Preconditioning for Bundle Adjustment".
+ //
+ // Doing the factorization like this saves us matrix mass when
+ // scaling is not needed, which is quite often in our experience.
+ bool status = Factorize();
+
+ // The scaling only affects the tri-diagonal case, since
+ // ScaleOffDiagonalBlocks only pays attenion to the cells that
+ // belong to the edges of the degree-2 forest. In the SCHUR_JACOBI
+ // and the CLUSTER_JACOBI cases, the preconditioner is guaranteed to
+ // be positive semidefinite.
+ if (!status && options_.preconditioner_type == CLUSTER_TRIDIAGONAL) {
+ VLOG(1) << "Unscaled factorization failed. Retrying with off-diagonal "
+ << "scaling";
+ ScaleOffDiagonalCells();
+ status = Factorize();
+ }
+
+ VLOG(2) << "Compute time: " << time(NULL) - start_time;
+ return status;
+}
+
+// Consider the preconditioner matrix as meta-block matrix, whose
+// blocks correspond to the clusters. Then cluster pairs corresponding
+// to edges in the degree-2 forest are off diagonal entries of this
+// matrix. Scaling these off-diagonal entries by 1/2 forces this
+// matrix to be positive definite.
+void VisibilityBasedPreconditioner::ScaleOffDiagonalCells() {
+ for (set< pair<int, int> >::const_iterator it = block_pairs_.begin();
+ it != block_pairs_.end();
+ ++it) {
+ const int block1 = it->first;
+ const int block2 = it->second;
+ if (!IsBlockPairOffDiagonal(block1, block2)) {
+ continue;
+ }
+
+ int r, c, row_stride, col_stride;
+ CellInfo* cell_info = m_->GetCell(block1, block2,
+ &r, &c,
+ &row_stride, &col_stride);
+ CHECK(cell_info != NULL)
+ << "Cell missing for block pair (" << block1 << "," << block2 << ")"
+ << " cluster pair (" << cluster_membership_[block1]
+ << " " << cluster_membership_[block2] << ")";
+
+ // Ah the magic of tri-diagonal matrices and diagonal
+ // dominance. See Lemma 1 in "Visibility Based Preconditioning
+ // For Bundle Adjustment".
+ MatrixRef m(cell_info->values, row_stride, col_stride);
+ m.block(r, c, block_size_[block1], block_size_[block2]) *= 0.5;
+ }
+}
+
+// Compute the sparse Cholesky factorization of the preconditioner
+// matrix.
+bool VisibilityBasedPreconditioner::Factorize() {
+ // Extract the TripletSparseMatrix that is used for actually storing
+ // S and convert it into a cholmod_sparse object.
+ cholmod_sparse* lhs = ss_.CreateSparseMatrix(
+ down_cast<BlockRandomAccessSparseMatrix*>(
+ m_.get())->mutable_matrix());
+
+ // The matrix is symmetric, and the upper triangular part of the
+ // matrix contains the values.
+ lhs->stype = 1;
+
+ // Symbolic factorization is computed if we don't already have one handy.
+ if (factor_ == NULL) {
+ if (options_.use_block_amd) {
+ factor_ = ss_.BlockAnalyzeCholesky(lhs, block_size_, block_size_);
+ } else {
+ factor_ = ss_.AnalyzeCholesky(lhs);
+ }
+
+ if (VLOG_IS_ON(2)) {
+ cholmod_print_common("Symbolic Analysis", ss_.mutable_cc());
+ }
+ }
+
+ CHECK_NOTNULL(factor_);
+
+ bool status = ss_.Cholesky(lhs, factor_);
+ ss_.Free(lhs);
+ return status;
+}
+
+void VisibilityBasedPreconditioner::RightMultiply(const double* x,
+ double* y) const {
+ CHECK_NOTNULL(x);
+ CHECK_NOTNULL(y);
+ SuiteSparse* ss = const_cast<SuiteSparse*>(&ss_);
+
+ const int num_rows = m_->num_rows();
+ memcpy(CHECK_NOTNULL(tmp_rhs_)->x, x, m_->num_rows() * sizeof(*x));
+ cholmod_dense* solution = CHECK_NOTNULL(ss->Solve(factor_, tmp_rhs_));
+ memcpy(y, solution->x, sizeof(*y) * num_rows);
+ ss->Free(solution);
+}
+
+int VisibilityBasedPreconditioner::num_rows() const {
+ return m_->num_rows();
+}
+
+// Classify camera/f_block pairs as in and out of the preconditioner,
+// based on whether the cluster pair that they belong to is in the
+// preconditioner or not.
+bool VisibilityBasedPreconditioner::IsBlockPairInPreconditioner(
+ const int block1,
+ const int block2) const {
+ int cluster1 = cluster_membership_[block1];
+ int cluster2 = cluster_membership_[block2];
+ if (cluster1 > cluster2) {
+ std::swap(cluster1, cluster2);
+ }
+ return (cluster_pairs_.count(make_pair(cluster1, cluster2)) > 0);
+}
+
+bool VisibilityBasedPreconditioner::IsBlockPairOffDiagonal(
+ const int block1,
+ const int block2) const {
+ return (cluster_membership_[block1] != cluster_membership_[block2]);
+}
+
+// Convert a graph into a list of edges that includes self edges for
+// each vertex.
+void VisibilityBasedPreconditioner::ForestToClusterPairs(
+ const Graph<int>& forest,
+ HashSet<pair<int, int> >* cluster_pairs) const {
+ CHECK_NOTNULL(cluster_pairs)->clear();
+ const HashSet<int>& vertices = forest.vertices();
+ CHECK_EQ(vertices.size(), num_clusters_);
+
+ // Add all the cluster pairs corresponding to the edges in the
+ // forest.
+ for (HashSet<int>::const_iterator it1 = vertices.begin();
+ it1 != vertices.end();
+ ++it1) {
+ const int cluster1 = *it1;
+ cluster_pairs->insert(make_pair(cluster1, cluster1));
+ const HashSet<int>& neighbors = forest.Neighbors(cluster1);
+ for (HashSet<int>::const_iterator it2 = neighbors.begin();
+ it2 != neighbors.end();
+ ++it2) {
+ const int cluster2 = *it2;
+ if (cluster1 < cluster2) {
+ cluster_pairs->insert(make_pair(cluster1, cluster2));
+ }
+ }
+ }
+}
+
+// The visibilty set of a cluster is the union of the visibilty sets
+// of all its cameras. In other words, the set of points visible to
+// any camera in the cluster.
+void VisibilityBasedPreconditioner::ComputeClusterVisibility(
+ const vector<set<int> >& visibility,
+ vector<set<int> >* cluster_visibility) const {
+ CHECK_NOTNULL(cluster_visibility)->resize(0);
+ cluster_visibility->resize(num_clusters_);
+ for (int i = 0; i < num_blocks_; ++i) {
+ const int cluster_id = cluster_membership_[i];
+ (*cluster_visibility)[cluster_id].insert(visibility[i].begin(),
+ visibility[i].end());
+ }
+}
+
+// Construct a graph whose vertices are the clusters, and the edge
+// weights are the number of 3D points visible to cameras in both the
+// vertices.
+Graph<int>* VisibilityBasedPreconditioner::CreateClusterGraph(
+ const vector<set<int> >& cluster_visibility) const {
+ Graph<int>* cluster_graph = new Graph<int>;
+
+ for (int i = 0; i < num_clusters_; ++i) {
+ cluster_graph->AddVertex(i);
+ }
+
+ for (int i = 0; i < num_clusters_; ++i) {
+ const set<int>& cluster_i = cluster_visibility[i];
+ for (int j = i+1; j < num_clusters_; ++j) {
+ vector<int> intersection;
+ const set<int>& cluster_j = cluster_visibility[j];
+ set_intersection(cluster_i.begin(), cluster_i.end(),
+ cluster_j.begin(), cluster_j.end(),
+ back_inserter(intersection));
+
+ if (intersection.size() > 0) {
+ // Clusters interact strongly when they share a large number
+ // of 3D points. The degree-2 maximum spanning forest
+ // alorithm, iterates on the edges in decreasing order of
+ // their weight, which is the number of points shared by the
+ // two cameras that it connects.
+ cluster_graph->AddEdge(i, j, intersection.size());
+ }
+ }
+ }
+ return cluster_graph;
+}
+
+// Canonical views clustering returns a HashMap from vertices to
+// cluster ids. Convert this into a flat array for quick lookup. It is
+// possible that some of the vertices may not be associated with any
+// cluster. In that case, randomly assign them to one of the clusters.
+void VisibilityBasedPreconditioner::FlattenMembershipMap(
+ const HashMap<int, int>& membership_map,
+ vector<int>* membership_vector) const {
+ CHECK_NOTNULL(membership_vector)->resize(0);
+ membership_vector->resize(num_blocks_, -1);
+ // Iterate over the cluster membership map and update the
+ // cluster_membership_ vector assigning arbitrary cluster ids to
+ // the few cameras that have not been clustered.
+ for (HashMap<int, int>::const_iterator it = membership_map.begin();
+ it != membership_map.end();
+ ++it) {
+ const int camera_id = it->first;
+ int cluster_id = it->second;
+
+ // If the view was not clustered, randomly assign it to one of the
+ // clusters. This preserves the mathematical correctness of the
+ // preconditioner. If there are too many views which are not
+ // clustered, it may lead to some quality degradation though.
+ //
+ // TODO(sameeragarwal): Check if a large number of views have not
+ // been clustered and deal with it?
+ if (cluster_id == -1) {
+ cluster_id = camera_id % num_clusters_;
+ }
+
+ membership_vector->at(camera_id) = cluster_id;
+ }
+}
+
+#endif // CERES_NO_SUITESPARSE
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/visibility_based_preconditioner.h b/internal/ceres/visibility_based_preconditioner.h
new file mode 100644
index 0000000..3246fb8
--- /dev/null
+++ b/internal/ceres/visibility_based_preconditioner.h
@@ -0,0 +1,273 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Preconditioners for linear systems that arise in Structure from
+// Motion problems. VisibilityBasedPreconditioner implements three
+// preconditioners:
+//
+// SCHUR_JACOBI
+// CLUSTER_JACOBI
+// CLUSTER_TRIDIAGONAL
+//
+// Detailed descriptions of these preconditions beyond what is
+// documented here can be found in
+//
+// Bundle Adjustment in the Large
+// S. Agarwal, N. Snavely, S. Seitz & R. Szeliski, ECCV 2010
+// http://www.cs.washington.edu/homes/sagarwal/bal.pdf
+//
+// Visibility Based Preconditioning for Bundle Adjustment
+// A. Kushal & S. Agarwal, submitted to CVPR 2012
+// http://www.cs.washington.edu/homes/sagarwal/vbp.pdf
+//
+// The three preconditioners share enough code that its most efficient
+// to implement them as part of the same code base.
+
+#ifndef CERES_INTERNAL_VISIBILITY_BASED_PRECONDITIONER_H_
+#define CERES_INTERNAL_VISIBILITY_BASED_PRECONDITIONER_H_
+
+#include <set>
+#include <vector>
+#include <utility>
+#include "ceres/collections_port.h"
+#include "ceres/graph.h"
+#include "ceres/linear_solver.h"
+#include "ceres/linear_operator.h"
+#include "ceres/suitesparse.h"
+#include "ceres/internal/macros.h"
+#include "ceres/internal/scoped_ptr.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockRandomAccessSparseMatrix;
+class BlockSparseMatrixBase;
+struct CompressedRowBlockStructure;
+class SchurEliminatorBase;
+
+// This class implements three preconditioners for Structure from
+// Motion/Bundle Adjustment problems. The name
+// VisibilityBasedPreconditioner comes from the fact that the sparsity
+// structure of the preconditioner matrix is determined by analyzing
+// the visibility structure of the scene, i.e. which cameras see which
+// points.
+//
+// Strictly speaking, SCHUR_JACOBI is not a visibility based
+// preconditioner but it is an extreme case of CLUSTER_JACOBI, where
+// every cluster contains exactly one camera block. Treating it as a
+// special case of CLUSTER_JACOBI makes it easy to implement as part
+// of the same code base with no significant loss of performance.
+//
+// In the following, we will only discuss CLUSTER_JACOBI and
+// CLUSTER_TRIDIAGONAL.
+//
+// The key idea of visibility based preconditioning is to identify
+// cameras that we expect have strong interactions, and then using the
+// entries in the Schur complement matrix corresponding to these
+// camera pairs as an approximation to the full Schur complement.
+//
+// CLUSTER_JACOBI identifies these camera pairs by clustering cameras,
+// and considering all non-zero camera pairs within each cluster. The
+// clustering in the current implementation is done using the
+// Canonical Views algorithm of Simon et al. (see
+// canonical_views_clustering.h). For the purposes of clustering, the
+// similarity or the degree of interaction between a pair of cameras
+// is measured by counting the number of points visible in both the
+// cameras. Thus the name VisibilityBasedPreconditioner. Further, if we
+// were to permute the parameter blocks such that all the cameras in
+// the same cluster occur contiguously, the preconditioner matrix will
+// be a block diagonal matrix with blocks corresponding to the
+// clusters. Thus in analogy with the Jacobi preconditioner we refer
+// to this as the CLUSTER_JACOBI preconditioner.
+//
+// CLUSTER_TRIDIAGONAL adds more mass to the CLUSTER_JACOBI
+// preconditioner by considering the interaction between clusters and
+// identifying strong interactions between cluster pairs. This is done
+// by constructing a weighted graph on the clusters, with the weight
+// on the edges connecting two clusters proportional to the number of
+// 3D points visible to cameras in both the clusters. A degree-2
+// maximum spanning forest is identified in this graph and the camera
+// pairs contained in the edges of this forest are added to the
+// preconditioner. The detailed reasoning for this construction is
+// explained in the paper mentioned above.
+//
+// Degree-2 spanning trees and forests have the property that they
+// correspond to tri-diagonal matrices. Thus there exist a permutation
+// of the camera blocks under which the CLUSTER_TRIDIAGONAL
+// preconditioner matrix is a block tridiagonal matrix, and thus the
+// name for the preconditioner.
+//
+// Thread Safety: This class is NOT thread safe.
+//
+// Example usage:
+//
+// LinearSolver::Options options;
+// options.preconditioner_type = CLUSTER_JACOBI;
+// options.num_eliminate_blocks = num_points;
+// VisibilityBasedPreconditioner preconditioner(
+// *A.block_structure(), options);
+// preconditioner.Update(A, NULL);
+// preconditioner.RightMultiply(x, y);
+//
+
+#ifndef CERES_NO_SUITESPARSE
+class VisibilityBasedPreconditioner : public LinearOperator {
+ public:
+ // Initialize the symbolic structure of the preconditioner. bs is
+ // the block structure of the linear system to be solved. It is used
+ // to determine the sparsity structure of the preconditioner matrix.
+ //
+ // It has the same structural requirement as other Schur complement
+ // based solvers. Please see schur_eliminator.h for more details.
+ //
+ // LinearSolver::Options::num_eliminate_blocks should be set to the
+ // number of e_blocks in the block structure.
+ //
+ // TODO(sameeragarwal): The use of LinearSolver::Options should
+ // ultimately be replaced with Preconditioner::Options and some sort
+ // of preconditioner factory along the lines of
+ // LinearSolver::CreateLinearSolver. I will wait to do this till I
+ // create a general purpose block Jacobi preconditioner for general
+ // sparse problems along with a CGLS solver.
+ VisibilityBasedPreconditioner(const CompressedRowBlockStructure& bs,
+ const LinearSolver::Options& options);
+ virtual ~VisibilityBasedPreconditioner();
+
+ // Update the numerical value of the preconditioner for the linear
+ // system:
+ //
+ // | A | x = |b|
+ // |diag(D)| |0|
+ //
+ // for some vector b. It is important that the matrix A have the
+ // same block structure as the one used to construct this object.
+ //
+ // D can be NULL, in which case its interpreted as a diagonal matrix
+ // of size zero.
+ bool Update(const BlockSparseMatrixBase& A, const double* D);
+
+
+ // LinearOperator interface. Since the operator is symmetric,
+ // LeftMultiply and num_cols are just calls to RightMultiply and
+ // num_rows respectively. Update() must be called before
+ // RightMultiply can be called.
+ virtual void RightMultiply(const double* x, double* y) const;
+ virtual void LeftMultiply(const double* x, double* y) const {
+ RightMultiply(x, y);
+ }
+ virtual int num_rows() const;
+ virtual int num_cols() const { return num_rows(); }
+
+ friend class VisibilityBasedPreconditionerTest;
+ private:
+ void ComputeSchurJacobiSparsity(const CompressedRowBlockStructure& bs);
+ void ComputeClusterJacobiSparsity(const CompressedRowBlockStructure& bs);
+ void ComputeClusterTridiagonalSparsity(const CompressedRowBlockStructure& bs);
+ void InitStorage(const CompressedRowBlockStructure& bs);
+ void InitEliminator(const CompressedRowBlockStructure& bs);
+ bool Factorize();
+ void ScaleOffDiagonalCells();
+
+ void ClusterCameras(const vector< set<int> >& visibility);
+ void FlattenMembershipMap(const HashMap<int, int>& membership_map,
+ vector<int>* membership_vector) const;
+ void ComputeClusterVisibility(const vector<set<int> >& visibility,
+ vector<set<int> >* cluster_visibility) const;
+ Graph<int>* CreateClusterGraph(const vector<set<int> >& visibility) const;
+ void ForestToClusterPairs(const Graph<int>& forest,
+ HashSet<pair<int, int> >* cluster_pairs) const;
+ void ComputeBlockPairsInPreconditioner(const CompressedRowBlockStructure& bs);
+ bool IsBlockPairInPreconditioner(int block1, int block2) const;
+ bool IsBlockPairOffDiagonal(int block1, int block2) const;
+
+ LinearSolver::Options options_;
+
+ // Number of parameter blocks in the schur complement.
+ int num_blocks_;
+ int num_clusters_;
+
+ // Sizes of the blocks in the schur complement.
+ vector<int> block_size_;
+
+ // Mapping from cameras to clusters.
+ vector<int> cluster_membership_;
+
+ // Non-zero camera pairs from the schur complement matrix that are
+ // present in the preconditioner, sorted by row (first element of
+ // each pair), then column (second).
+ set<pair<int, int> > block_pairs_;
+
+ // Set of cluster pairs (including self pairs (i,i)) in the
+ // preconditioner.
+ HashSet<pair<int, int> > cluster_pairs_;
+ scoped_ptr<SchurEliminatorBase> eliminator_;
+
+ // Preconditioner matrix.
+ scoped_ptr<BlockRandomAccessSparseMatrix> m_;
+
+ // RightMultiply is a const method for LinearOperators. It is
+ // implemented using CHOLMOD's sparse triangular matrix solve
+ // function. This however requires non-const access to the
+ // SuiteSparse context object, even though it does not result in any
+ // of the state of the preconditioner being modified.
+ SuiteSparse ss_;
+
+ // Symbolic and numeric factorization of the preconditioner.
+ cholmod_factor* factor_;
+
+ // Temporary vector used by RightMultiply.
+ cholmod_dense* tmp_rhs_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(VisibilityBasedPreconditioner);
+};
+#else // SuiteSparse
+// If SuiteSparse is not compiled in, the preconditioner is not
+// available.
+class VisibilityBasedPreconditioner : public LinearOperator {
+ public:
+ VisibilityBasedPreconditioner(const CompressedRowBlockStructure& bs,
+ const LinearSolver::Options& options) {
+ LOG(FATAL) << "Visibility based preconditioning is not available. Please "
+ "build Ceres with SuiteSparse.";
+ }
+ virtual ~VisibilityBasedPreconditioner() {}
+ virtual void RightMultiply(const double* x, double* y) const {}
+ virtual void LeftMultiply(const double* x, double* y) const {}
+ virtual int num_rows() const { return -1; }
+ virtual int num_cols() const { return -1; }
+ bool Update(const BlockSparseMatrixBase& A, const double* D) {
+ return false;
+ }
+};
+#endif // CERES_NO_SUITESPARSE
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_VISIBILITY_BASED_PRECONDITIONER_H_
diff --git a/internal/ceres/visibility_based_preconditioner_test.cc b/internal/ceres/visibility_based_preconditioner_test.cc
new file mode 100644
index 0000000..8c5378d
--- /dev/null
+++ b/internal/ceres/visibility_based_preconditioner_test.cc
@@ -0,0 +1,362 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_NO_SUITESPARSE
+
+#include "ceres/visibility_based_preconditioner.h"
+
+#include "Eigen/Dense"
+#include "ceres/block_random_access_dense_matrix.h"
+#include "ceres/block_random_access_sparse_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/casts.h"
+#include "ceres/collections_port.h"
+#include "ceres/file.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/stringprintf.h"
+#include "ceres/types.h"
+#include "ceres/test_util.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+using testing::AssertionResult;
+using testing::AssertionSuccess;
+using testing::AssertionFailure;
+
+static const double kTolerance = 1e-12;
+
+class VisibilityBasedPreconditionerTest : public ::testing::Test {
+ public:
+ static const int kCameraSize = 9;
+
+ protected:
+ void SetUp() {
+ string input_file = TestFileAbsolutePath("problem-6-1384-000.lsqp");
+
+ scoped_ptr<LinearLeastSquaresProblem> problem(
+ CHECK_NOTNULL(CreateLinearLeastSquaresProblemFromFile(input_file)));
+ A_.reset(down_cast<BlockSparseMatrix*>(problem->A.release()));
+ b_.reset(problem->b.release());
+ D_.reset(problem->D.release());
+
+ const CompressedRowBlockStructure* bs =
+ CHECK_NOTNULL(A_->block_structure());
+ const int num_col_blocks = bs->cols.size();
+
+ num_cols_ = A_->num_cols();
+ num_rows_ = A_->num_rows();
+ num_eliminate_blocks_ = problem->num_eliminate_blocks;
+ num_camera_blocks_ = num_col_blocks - num_eliminate_blocks_;
+ options_.elimination_groups.push_back(num_eliminate_blocks_);
+ options_.elimination_groups.push_back(
+ A_->block_structure()->cols.size() - num_eliminate_blocks_);
+
+ vector<int> blocks(num_col_blocks - num_eliminate_blocks_, 0);
+ for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
+ blocks[i - num_eliminate_blocks_] = bs->cols[i].size;
+ }
+
+ // The input matrix is a real jacobian and fairly poorly
+ // conditioned. Setting D to a large constant makes the normal
+ // equations better conditioned and makes the tests below better
+ // conditioned.
+ VectorRef(D_.get(), num_cols_).setConstant(10.0);
+
+ schur_complement_.reset(new BlockRandomAccessDenseMatrix(blocks));
+ Vector rhs(schur_complement_->num_rows());
+
+ scoped_ptr<SchurEliminatorBase> eliminator;
+ eliminator.reset(SchurEliminatorBase::Create(options_));
+ eliminator->Init(num_eliminate_blocks_, bs);
+ eliminator->Eliminate(A_.get(), b_.get(), D_.get(),
+ schur_complement_.get(), rhs.data());
+ }
+
+
+ AssertionResult IsSparsityStructureValid() {
+ preconditioner_->InitStorage(*A_->block_structure());
+ const HashSet<pair<int, int> >& cluster_pairs = get_cluster_pairs();
+ const vector<int>& cluster_membership = get_cluster_membership();
+
+ for (int i = 0; i < num_camera_blocks_; ++i) {
+ for (int j = i; j < num_camera_blocks_; ++j) {
+ if (cluster_pairs.count(make_pair(cluster_membership[i],
+ cluster_membership[j]))) {
+ if (!IsBlockPairInPreconditioner(i, j)) {
+ return AssertionFailure()
+ << "block pair (" << i << "," << j << "missing";
+ }
+ } else {
+ if (IsBlockPairInPreconditioner(i, j)) {
+ return AssertionFailure()
+ << "block pair (" << i << "," << j << "should not be present";
+ }
+ }
+ }
+ }
+ return AssertionSuccess();
+ }
+
+ AssertionResult PreconditionerValuesMatch() {
+ preconditioner_->Update(*A_, D_.get());
+ const HashSet<pair<int, int> >& cluster_pairs = get_cluster_pairs();
+ const BlockRandomAccessSparseMatrix* m = get_m();
+ Matrix preconditioner_matrix;
+ m->matrix()->ToDenseMatrix(&preconditioner_matrix);
+ ConstMatrixRef full_schur_complement(schur_complement_->values(),
+ m->num_rows(),
+ m->num_rows());
+ const int num_clusters = get_num_clusters();
+ const int kDiagonalBlockSize =
+ kCameraSize * num_camera_blocks_ / num_clusters;
+
+ for (int i = 0; i < num_clusters; ++i) {
+ for (int j = i; j < num_clusters; ++j) {
+ double diff = 0.0;
+ if (cluster_pairs.count(make_pair(i, j))) {
+ diff =
+ (preconditioner_matrix.block(kDiagonalBlockSize * i,
+ kDiagonalBlockSize * j,
+ kDiagonalBlockSize,
+ kDiagonalBlockSize) -
+ full_schur_complement.block(kDiagonalBlockSize * i,
+ kDiagonalBlockSize * j,
+ kDiagonalBlockSize,
+ kDiagonalBlockSize)).norm();
+ } else {
+ diff = preconditioner_matrix.block(kDiagonalBlockSize * i,
+ kDiagonalBlockSize * j,
+ kDiagonalBlockSize,
+ kDiagonalBlockSize).norm();
+ }
+ if (diff > kTolerance) {
+ return AssertionFailure()
+ << "Preconditioner block " << i << " " << j << " differs "
+ << "from expected value by " << diff;
+ }
+ }
+ }
+ return AssertionSuccess();
+ }
+
+ // Accessors
+ int get_num_blocks() { return preconditioner_->num_blocks_; }
+
+ int get_num_clusters() { return preconditioner_->num_clusters_; }
+ int* get_mutable_num_clusters() { return &preconditioner_->num_clusters_; }
+
+ const vector<int>& get_block_size() {
+ return preconditioner_->block_size_; }
+
+ vector<int>* get_mutable_block_size() {
+ return &preconditioner_->block_size_; }
+
+ const vector<int>& get_cluster_membership() {
+ return preconditioner_->cluster_membership_;
+ }
+
+ vector<int>* get_mutable_cluster_membership() {
+ return &preconditioner_->cluster_membership_;
+ }
+
+ const set<pair<int, int> >& get_block_pairs() {
+ return preconditioner_->block_pairs_;
+ }
+
+ set<pair<int, int> >* get_mutable_block_pairs() {
+ return &preconditioner_->block_pairs_;
+ }
+
+ const HashSet<pair<int, int> >& get_cluster_pairs() {
+ return preconditioner_->cluster_pairs_;
+ }
+
+ HashSet<pair<int, int> >* get_mutable_cluster_pairs() {
+ return &preconditioner_->cluster_pairs_;
+ }
+
+ bool IsBlockPairInPreconditioner(const int block1, const int block2) {
+ return preconditioner_->IsBlockPairInPreconditioner(block1, block2);
+ }
+
+ bool IsBlockPairOffDiagonal(const int block1, const int block2) {
+ return preconditioner_->IsBlockPairOffDiagonal(block1, block2);
+ }
+
+ const BlockRandomAccessSparseMatrix* get_m() {
+ return preconditioner_->m_.get();
+ }
+
+ int num_rows_;
+ int num_cols_;
+ int num_eliminate_blocks_;
+ int num_camera_blocks_;
+
+ scoped_ptr<BlockSparseMatrix> A_;
+ scoped_array<double> b_;
+ scoped_array<double> D_;
+
+ LinearSolver::Options options_;
+ scoped_ptr<VisibilityBasedPreconditioner> preconditioner_;
+ scoped_ptr<BlockRandomAccessDenseMatrix> schur_complement_;
+};
+
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+TEST_F(VisibilityBasedPreconditionerTest, SchurJacobiStructure) {
+ options_.preconditioner_type = SCHUR_JACOBI;
+ preconditioner_.reset(
+ new VisibilityBasedPreconditioner(*A_->block_structure(), options_));
+ EXPECT_EQ(get_num_blocks(), num_camera_blocks_);
+ EXPECT_EQ(get_num_clusters(), num_camera_blocks_);
+ for (int i = 0; i < num_camera_blocks_; ++i) {
+ for (int j = 0; j < num_camera_blocks_; ++j) {
+ const string msg = StringPrintf("Camera pair: %d %d", i, j);
+ SCOPED_TRACE(msg);
+ if (i == j) {
+ EXPECT_TRUE(IsBlockPairInPreconditioner(i, j));
+ EXPECT_FALSE(IsBlockPairOffDiagonal(i, j));
+ } else {
+ EXPECT_FALSE(IsBlockPairInPreconditioner(i, j));
+ EXPECT_TRUE(IsBlockPairOffDiagonal(i, j));
+ }
+ }
+ }
+}
+
+TEST_F(VisibilityBasedPreconditionerTest, OneClusterClusterJacobi) {
+ options_.preconditioner_type = CLUSTER_JACOBI;
+ preconditioner_.reset(
+ new VisibilityBasedPreconditioner(*A_->block_structure(), options_));
+
+ // Override the clustering to be a single clustering containing all
+ // the cameras.
+ vector<int>& cluster_membership = *get_mutable_cluster_membership();
+ for (int i = 0; i < num_camera_blocks_; ++i) {
+ cluster_membership[i] = 0;
+ }
+
+ *get_mutable_num_clusters() = 1;
+
+ HashSet<pair<int, int> >& cluster_pairs = *get_mutable_cluster_pairs();
+ cluster_pairs.clear();
+ cluster_pairs.insert(make_pair(0, 0));
+
+ EXPECT_TRUE(IsSparsityStructureValid());
+ EXPECT_TRUE(PreconditionerValuesMatch());
+
+ // Multiplication by the inverse of the preconditioner.
+ const int num_rows = schur_complement_->num_rows();
+ ConstMatrixRef full_schur_complement(schur_complement_->values(),
+ num_rows,
+ num_rows);
+ Vector x(num_rows);
+ Vector y(num_rows);
+ Vector z(num_rows);
+
+ for (int i = 0; i < num_rows; ++i) {
+ x.setZero();
+ y.setZero();
+ z.setZero();
+ x[i] = 1.0;
+ preconditioner_->RightMultiply(x.data(), y.data());
+ z = full_schur_complement
+ .selfadjointView<Eigen::Upper>()
+ .ldlt().solve(x);
+ double max_relative_difference =
+ ((y - z).array() / z.array()).matrix().lpNorm<Eigen::Infinity>();
+ EXPECT_NEAR(max_relative_difference, 0.0, kTolerance);
+ }
+}
+
+
+
+TEST_F(VisibilityBasedPreconditionerTest, ClusterJacobi) {
+ options_.preconditioner_type = CLUSTER_JACOBI;
+ preconditioner_.reset(
+ new VisibilityBasedPreconditioner(*A_->block_structure(), options_));
+
+ // Override the clustering to be equal number of cameras.
+ vector<int>& cluster_membership = *get_mutable_cluster_membership();
+ cluster_membership.resize(num_camera_blocks_);
+ static const int kNumClusters = 3;
+
+ for (int i = 0; i < num_camera_blocks_; ++i) {
+ cluster_membership[i] = (i * kNumClusters) / num_camera_blocks_;
+ }
+ *get_mutable_num_clusters() = kNumClusters;
+
+ HashSet<pair<int, int> >& cluster_pairs = *get_mutable_cluster_pairs();
+ cluster_pairs.clear();
+ for (int i = 0; i < kNumClusters; ++i) {
+ cluster_pairs.insert(make_pair(i, i));
+ }
+
+ EXPECT_TRUE(IsSparsityStructureValid());
+ EXPECT_TRUE(PreconditionerValuesMatch());
+}
+
+
+TEST_F(VisibilityBasedPreconditionerTest, ClusterTridiagonal) {
+ options_.preconditioner_type = CLUSTER_TRIDIAGONAL;
+ preconditioner_.reset(
+ new VisibilityBasedPreconditioner(*A_->block_structure(), options_));
+ static const int kNumClusters = 3;
+
+ // Override the clustering to be 3 clusters.
+ vector<int>& cluster_membership = *get_mutable_cluster_membership();
+ cluster_membership.resize(num_camera_blocks_);
+ for (int i = 0; i < num_camera_blocks_; ++i) {
+ cluster_membership[i] = (i * kNumClusters) / num_camera_blocks_;
+ }
+ *get_mutable_num_clusters() = kNumClusters;
+
+ // Spanning forest has structure 0-1 2
+ HashSet<pair<int, int> >& cluster_pairs = *get_mutable_cluster_pairs();
+ cluster_pairs.clear();
+ for (int i = 0; i < kNumClusters; ++i) {
+ cluster_pairs.insert(make_pair(i, i));
+ }
+ cluster_pairs.insert(make_pair(0, 1));
+
+ EXPECT_TRUE(IsSparsityStructureValid());
+ EXPECT_TRUE(PreconditionerValuesMatch());
+}
+#endif // CERES_NO_PROTOCOL_BUFFERS
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_NO_SUITESPARSE
diff --git a/internal/ceres/visibility_test.cc b/internal/ceres/visibility_test.cc
new file mode 100644
index 0000000..793a19d
--- /dev/null
+++ b/internal/ceres/visibility_test.cc
@@ -0,0 +1,203 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: kushalav@google.com (Avanish Kushal)
+// sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/visibility.h"
+
+#include <set>
+#include <vector>
+#include "ceres/block_structure.h"
+#include "ceres/graph.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "glog/logging.h"
+#include "gtest/gtest.h"
+
+namespace ceres {
+namespace internal {
+
+class VisibilityTest : public ::testing::Test {
+};
+
+TEST(VisibilityTest, SimpleMatrix) {
+ // A = [1 0 0 0 0 1
+ // 1 0 0 1 0 0
+ // 0 1 1 0 0 0
+ // 0 1 0 0 1 0]
+
+ int num_cols = 6;
+ int num_eliminate_blocks = 2;
+ CompressedRowBlockStructure bs;
+
+ // Row 1
+ {
+ bs.rows.push_back(CompressedRow());
+ CompressedRow& row = bs.rows.back();
+ row.block.size = 2;
+ row.block.position = 0;
+ row.cells.push_back(Cell(0, 0));
+ row.cells.push_back(Cell(5, 0));
+ }
+
+ // Row 2
+ {
+ bs.rows.push_back(CompressedRow());
+ CompressedRow& row = bs.rows.back();
+ row.block.size = 2;
+ row.block.position = 2;
+ row.cells.push_back(Cell(0, 1));
+ row.cells.push_back(Cell(3, 1));
+ }
+
+ // Row 3
+ {
+ bs.rows.push_back(CompressedRow());
+ CompressedRow& row = bs.rows.back();
+ row.block.size = 2;
+ row.block.position = 4;
+ row.cells.push_back(Cell(1, 2));
+ row.cells.push_back(Cell(2, 2));
+ }
+
+ // Row 4
+ {
+ bs.rows.push_back(CompressedRow());
+ CompressedRow& row = bs.rows.back();
+ row.block.size = 2;
+ row.block.position = 6;
+ row.cells.push_back(Cell(1, 3));
+ row.cells.push_back(Cell(4, 3));
+ }
+ bs.cols.resize(num_cols);
+
+ vector< set<int> > visibility;
+ ComputeVisibility(bs, num_eliminate_blocks, &visibility);
+ ASSERT_EQ(visibility.size(), num_cols - num_eliminate_blocks);
+ for (int i = 0; i < visibility.size(); ++i) {
+ ASSERT_EQ(visibility[i].size(), 1);
+ }
+
+ scoped_ptr<Graph<int> > graph(CreateSchurComplementGraph(visibility));
+ EXPECT_EQ(graph->vertices().size(), visibility.size());
+ for (int i = 0; i < visibility.size(); ++i) {
+ EXPECT_EQ(graph->VertexWeight(i), 1.0);
+ }
+
+ for (int i = 0; i < visibility.size(); ++i) {
+ for (int j = i; j < visibility.size(); ++j) {
+ double edge_weight = 0.0;
+ if ((i == 1 && j == 3) || (i == 0 && j == 2) || (i == j)) {
+ edge_weight = 1.0;
+ }
+
+ EXPECT_EQ(graph->EdgeWeight(i, j), edge_weight)
+ << "Edge: " << i << " " << j
+ << " weight: " << graph->EdgeWeight(i, j)
+ << " expected weight: " << edge_weight;
+ }
+ }
+}
+
+
+TEST(VisibilityTest, NoEBlocks) {
+ // A = [1 0 0 0 0 0
+ // 1 0 0 0 0 0
+ // 0 1 0 0 0 0
+ // 0 1 0 0 0 0]
+
+ int num_cols = 6;
+ int num_eliminate_blocks = 2;
+ CompressedRowBlockStructure bs;
+
+ // Row 1
+ {
+ bs.rows.push_back(CompressedRow());
+ CompressedRow& row = bs.rows.back();
+ row.block.size = 2;
+ row.block.position = 0;
+ row.cells.push_back(Cell(0, 0));
+ }
+
+ // Row 2
+ {
+ bs.rows.push_back(CompressedRow());
+ CompressedRow& row = bs.rows.back();
+ row.block.size = 2;
+ row.block.position = 2;
+ row.cells.push_back(Cell(0, 1));
+ }
+
+ // Row 3
+ {
+ bs.rows.push_back(CompressedRow());
+ CompressedRow& row = bs.rows.back();
+ row.block.size = 2;
+ row.block.position = 4;
+ row.cells.push_back(Cell(1, 2));
+ }
+
+ // Row 4
+ {
+ bs.rows.push_back(CompressedRow());
+ CompressedRow& row = bs.rows.back();
+ row.block.size = 2;
+ row.block.position = 6;
+ row.cells.push_back(Cell(1, 3));
+ }
+ bs.cols.resize(num_cols);
+
+ vector<set<int> > visibility;
+ ComputeVisibility(bs, num_eliminate_blocks, &visibility);
+ ASSERT_EQ(visibility.size(), num_cols - num_eliminate_blocks);
+ for (int i = 0; i < visibility.size(); ++i) {
+ ASSERT_EQ(visibility[i].size(), 0);
+ }
+
+ scoped_ptr<Graph<int> > graph(CreateSchurComplementGraph(visibility));
+ EXPECT_EQ(graph->vertices().size(), visibility.size());
+ for (int i = 0; i < visibility.size(); ++i) {
+ EXPECT_EQ(graph->VertexWeight(i), 1.0);
+ }
+
+ for (int i = 0; i < visibility.size(); ++i) {
+ for (int j = i; j < visibility.size(); ++j) {
+ double edge_weight = 0.0;
+ if (i == j) {
+ edge_weight = 1.0;
+ }
+ EXPECT_EQ(graph->EdgeWeight(i, j), edge_weight)
+ << "Edge: " << i << " " << j
+ << " weight: " << graph->EdgeWeight(i, j)
+ << " expected weight: " << edge_weight;
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/wall_time.cc b/internal/ceres/wall_time.cc
new file mode 100644
index 0000000..0dce19f
--- /dev/null
+++ b/internal/ceres/wall_time.cc
@@ -0,0 +1,49 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+#ifdef CERES_USE_OPENMP
+#include <omp.h>
+#else
+#include <ctime>
+#endif
+
+namespace ceres {
+namespace internal {
+
+double WallTimeInSeconds() {
+#ifdef CERES_USE_OPENMP
+ return omp_get_wtime();
+#else
+ return static_cast<double>(std::time(NULL));
+#endif
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/internal/ceres/wall_time.h b/internal/ceres/wall_time.h
new file mode 100644
index 0000000..1a6e3bb
--- /dev/null
+++ b/internal/ceres/wall_time.h
@@ -0,0 +1,44 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+#ifndef CERES_INTERNAL_WALL_TIME_H_
+#define CERES_INTERNAL_WALL_TIME_H_
+
+namespace ceres {
+namespace internal {
+
+// Returns time, in seconds, from some arbitrary starting point. Has very
+// high precision if OpenMP is available, otherwise only second granularity.
+double WallTimeInSeconds();
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_WALL_TIME_H_