aboutsummaryrefslogtreecommitdiff
path: root/doc
diff options
context:
space:
mode:
Diffstat (limited to 'doc')
-rw-r--r--doc/A05_PortingFrom2To3.dox299
-rw-r--r--doc/AsciiQuickReference.txt11
-rw-r--r--doc/CMakeLists.txt26
-rw-r--r--doc/CoeffwiseMathFunctionsTable.dox83
-rw-r--r--doc/CustomizingEigen_CustomScalar.dox2
-rw-r--r--doc/DenseDecompositionBenchmark.dox2
-rw-r--r--doc/Doxyfile.in32
-rw-r--r--doc/FixedSizeVectorizable.dox10
-rw-r--r--doc/FunctionsTakingEigenTypes.dox6
-rw-r--r--doc/HiPerformance.dox2
-rw-r--r--doc/InsideEigenExample.dox5
-rw-r--r--doc/LeastSquares.dox17
-rw-r--r--doc/Manual.dox51
-rw-r--r--doc/Overview.dox2
-rw-r--r--doc/PassingByValue.dox8
-rw-r--r--doc/Pitfalls.dox121
-rw-r--r--doc/PreprocessorDirectives.dox29
-rw-r--r--doc/QuickReference.dox26
-rw-r--r--doc/QuickStartGuide.dox4
-rw-r--r--doc/SparseLinearSystems.dox3
-rw-r--r--doc/SparseQuickReference.dox4
-rw-r--r--doc/StlContainers.dox41
-rw-r--r--doc/StructHavingEigenMembers.dox81
-rw-r--r--doc/TemplateKeyword.dox2
-rw-r--r--doc/TopicCMakeGuide.dox10
-rw-r--r--doc/TopicLazyEvaluation.dox76
-rw-r--r--doc/TopicLinearAlgebraDecompositions.dox32
-rw-r--r--doc/TopicMultithreading.dox35
-rw-r--r--doc/TutorialBlockOperations.dox14
-rw-r--r--doc/TutorialGeometry.dox6
-rw-r--r--doc/TutorialLinearAlgebra.dox87
-rw-r--r--doc/TutorialMapClass.dox4
-rw-r--r--doc/TutorialMatrixClass.dox30
-rw-r--r--doc/TutorialReshape.dox82
-rw-r--r--doc/TutorialReshapeSlicing.dox65
-rw-r--r--doc/TutorialSTL.dox66
-rw-r--r--doc/TutorialSlicingIndexing.dox244
-rw-r--r--doc/TutorialSparse.dox6
-rw-r--r--doc/UnalignedArrayAssert.dox47
-rw-r--r--doc/UsingIntelMKL.dox6
-rw-r--r--doc/UsingNVCC.dox12
-rw-r--r--doc/eigen_navtree_hacks.js47
-rw-r--r--doc/eigendoxy.css21
-rw-r--r--doc/eigendoxy_footer.html.in17
-rw-r--r--doc/eigendoxy_header.html.in21
-rw-r--r--doc/examples/CMakeLists.txt3
-rw-r--r--doc/examples/Cwise_lgamma.cpp2
-rw-r--r--doc/examples/TutorialLinAlgSVDSolve.cpp2
-rw-r--r--doc/examples/Tutorial_BlockOperations_block_assignment.cpp2
-rw-r--r--doc/examples/Tutorial_simple_example_dynamic_size.cpp2
-rw-r--r--doc/examples/class_FixedReshaped.cpp22
-rw-r--r--doc/examples/class_Reshaped.cpp23
-rw-r--r--doc/examples/matrixfree_cg.cpp1
-rw-r--r--doc/examples/nullary_indexing.cpp10
-rw-r--r--doc/snippets/Array_initializer_list_23_cxx11.cpp5
-rw-r--r--doc/snippets/Array_initializer_list_vector_cxx11.cpp2
-rw-r--r--doc/snippets/Array_variadic_ctor_cxx11.cpp3
-rw-r--r--doc/snippets/BiCGSTAB_simple.cpp2
-rw-r--r--doc/snippets/BiCGSTAB_step_by_step.cpp2
-rw-r--r--doc/snippets/CMakeLists.txt44
-rw-r--r--doc/snippets/ComplexEigenSolver_eigenvectors.cpp2
-rw-r--r--doc/snippets/Cwise_rint.cpp3
-rw-r--r--doc/snippets/DenseBase_LinSpaced_seq_deprecated.cpp (renamed from doc/snippets/DenseBase_LinSpaced_seq.cpp)0
-rw-r--r--doc/snippets/DirectionWise_hnormalized.cpp3
-rw-r--r--doc/snippets/Jacobi_makeGivens.cpp2
-rw-r--r--doc/snippets/Jacobi_makeJacobi.cpp2
-rw-r--r--doc/snippets/Map_placement_new.cpp2
-rw-r--r--doc/snippets/MatrixBase_colwise_iterator_cxx11.cpp12
-rw-r--r--doc/snippets/MatrixBase_cwiseArg.cpp3
-rw-r--r--doc/snippets/MatrixBase_cwiseEqual.cpp2
-rw-r--r--doc/snippets/MatrixBase_cwiseNotEqual.cpp2
-rw-r--r--doc/snippets/MatrixBase_hnormalized.cpp2
-rw-r--r--doc/snippets/MatrixBase_homogeneous.cpp2
-rw-r--r--doc/snippets/MatrixBase_reshaped_auto.cpp4
-rw-r--r--doc/snippets/MatrixBase_reshaped_fixed.cpp3
-rw-r--r--doc/snippets/MatrixBase_reshaped_int_int.cpp3
-rw-r--r--doc/snippets/MatrixBase_reshaped_to_vector.cpp4
-rw-r--r--doc/snippets/Matrix_Map_stride.cpp7
-rw-r--r--doc/snippets/Matrix_initializer_list_23_cxx11.cpp5
-rw-r--r--doc/snippets/Matrix_initializer_list_vector_cxx11.cpp2
-rw-r--r--doc/snippets/Matrix_variadic_ctor_cxx11.cpp3
-rw-r--r--doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp2
-rw-r--r--doc/snippets/Slicing_arrayexpr.cpp4
-rw-r--r--doc/snippets/Slicing_custom_padding_cxx11.cpp12
-rw-r--r--doc/snippets/Slicing_rawarray_cxx11.cpp5
-rw-r--r--doc/snippets/Slicing_stdvector_cxx11.cpp4
-rw-r--r--doc/snippets/TopicAliasing_mult4.cpp2
-rw-r--r--doc/snippets/Tridiagonalization_decomposeInPlace.cpp3
-rw-r--r--doc/snippets/Tutorial_ReshapeMat2Mat.cpp2
-rw-r--r--doc/snippets/Tutorial_ReshapeMat2Vec.cpp2
-rw-r--r--doc/snippets/Tutorial_SlicingCol.cpp2
-rw-r--r--doc/snippets/Tutorial_SlicingVec.cpp2
-rw-r--r--doc/snippets/Tutorial_range_for_loop_1d_cxx11.cpp4
-rw-r--r--doc/snippets/Tutorial_range_for_loop_2d_cxx11.cpp5
-rw-r--r--doc/snippets/Tutorial_reshaped_vs_resize_1.cpp5
-rw-r--r--doc/snippets/Tutorial_reshaped_vs_resize_2.cpp6
-rw-r--r--doc/snippets/Tutorial_std_sort.cpp4
-rw-r--r--doc/snippets/Tutorial_std_sort_rows_cxx11.cpp5
-rw-r--r--doc/snippets/VectorwiseOp_homogeneous.cpp3
-rw-r--r--doc/snippets/compile_snippet.cpp.in5
-rw-r--r--doc/snippets/tut_arithmetic_transpose_aliasing.cpp2
-rw-r--r--doc/snippets/tut_arithmetic_transpose_inplace.cpp2
-rw-r--r--doc/special_examples/CMakeLists.txt5
-rw-r--r--doc/special_examples/Tutorial_sparse_example.cpp8
104 files changed, 1374 insertions, 708 deletions
diff --git a/doc/A05_PortingFrom2To3.dox b/doc/A05_PortingFrom2To3.dox
deleted file mode 100644
index 51555f996..000000000
--- a/doc/A05_PortingFrom2To3.dox
+++ /dev/null
@@ -1,299 +0,0 @@
-namespace Eigen {
-
-/** \page Eigen2ToEigen3 Porting from Eigen2 to Eigen3
-
-This page lists the most important API changes between Eigen2 and Eigen3,
-and gives tips to help porting your application from Eigen2 to Eigen3.
-
-\eigenAutoToc
-
-\section CompatibilitySupport Eigen2 compatibility support
-
-Up to version 3.2 %Eigen provides <a href="http://eigen.tuxfamily.org/dox-3.2/Eigen2SupportModes.html">Eigen2 support modes</a>. These are removed now, because they were barely used anymore and became hard to maintain after internal re-designs.
-You can still use them by first <a href="http://eigen.tuxfamily.org/dox-3.2/Eigen2ToEigen3.html">porting your code to Eigen 3.2</a>.
-
-\section Using The USING_PART_OF_NAMESPACE_EIGEN macro
-
-The USING_PART_OF_NAMESPACE_EIGEN macro has been removed. In Eigen 3, just do:
-\code
-using namespace Eigen;
-\endcode
-
-\section ComplexDot Dot products over complex numbers
-
-This is the single trickiest change between Eigen 2 and Eigen 3. It only affects code using \c std::complex numbers as scalar type.
-
-Eigen 2's dot product was linear in the first variable. Eigen 3's dot product is linear in the second variable. In other words, the Eigen 2 code \code x.dot(y) \endcode is equivalent to the Eigen 3 code \code y.dot(x) \endcode In yet other words, dot products are complex-conjugated in Eigen 3 compared to Eigen 2. The switch to the new convention was commanded by common usage, especially with the notation \f$ x^Ty \f$ for dot products of column-vectors.
-
-\section VectorBlocks Vector blocks
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th></th>
-<tr><td>\code
-vector.start(length)
-vector.start<length>()
-vector.end(length)
-vector.end<length>()
-\endcode</td><td>\code
-vector.head(length)
-vector.head<length>()
-vector.tail(length)
-vector.tail<length>()
-\endcode</td></tr>
-</table>
-
-
-\section Corners Matrix Corners
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th></th>
-<tr><td>\code
-matrix.corner(TopLeft,r,c)
-matrix.corner(TopRight,r,c)
-matrix.corner(BottomLeft,r,c)
-matrix.corner(BottomRight,r,c)
-matrix.corner<r,c>(TopLeft)
-matrix.corner<r,c>(TopRight)
-matrix.corner<r,c>(BottomLeft)
-matrix.corner<r,c>(BottomRight)
-\endcode</td><td>\code
-matrix.topLeftCorner(r,c)
-matrix.topRightCorner(r,c)
-matrix.bottomLeftCorner(r,c)
-matrix.bottomRightCorner(r,c)
-matrix.topLeftCorner<r,c>()
-matrix.topRightCorner<r,c>()
-matrix.bottomLeftCorner<r,c>()
-matrix.bottomRightCorner<r,c>()
-\endcode</td>
-</tr>
-</table>
-
-Notice that Eigen3 also provides these new convenience methods: topRows(), bottomRows(), leftCols(), rightCols(). See in class DenseBase.
-
-\section CoefficientWiseOperations Coefficient wise operations
-
-In Eigen2, coefficient wise operations which have no proper mathematical definition (as a coefficient wise product)
-were achieved using the .cwise() prefix, e.g.:
-\code a.cwise() * b \endcode
-In Eigen3 this .cwise() prefix has been superseded by a new kind of matrix type called
-Array for which all operations are performed coefficient wise. You can easily view a matrix as an array and vice versa using
-the MatrixBase::array() and ArrayBase::matrix() functions respectively. Here is an example:
-\code
-Vector4f a, b, c;
-c = a.array() * b.array();
-\endcode
-Note that the .array() function is not at all a synonym of the deprecated .cwise() prefix.
-While the .cwise() prefix changed the behavior of the following operator, the array() function performs
-a permanent conversion to the array world. Therefore, for binary operations such as the coefficient wise product,
-both sides must be converted to an \em array as in the above example. On the other hand, when you
-concatenate multiple coefficient wise operations you only have to do the conversion once, e.g.:
-\code
-Vector4f a, b, c;
-c = a.array().abs().pow(3) * b.array().abs().sin();
-\endcode
-With Eigen2 you would have written:
-\code
-c = (a.cwise().abs().cwise().pow(3)).cwise() * (b.cwise().abs().cwise().sin());
-\endcode
-
-\section PartAndExtract Triangular and self-adjoint matrices
-
-In Eigen 2 you had to play with the part, extract, and marked functions to deal with triangular and selfadjoint matrices. In Eigen 3, all these functions have been removed in favor of the concept of \em views:
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th></tr>
-<tr><td>\code
-A.part<UpperTriangular>();
-A.part<StrictlyLowerTriangular>(); \endcode</td>
-<td>\code
-A.triangularView<Upper>()
-A.triangularView<StrictlyLower>()\endcode</td></tr>
-<tr><td>\code
-A.extract<UpperTriangular>();
-A.extract<StrictlyLowerTriangular>();\endcode</td>
-<td>\code
-A.triangularView<Upper>()
-A.triangularView<StrictlyLower>()\endcode</td></tr>
-<tr><td>\code
-A.marked<UpperTriangular>();
-A.marked<StrictlyLowerTriangular>();\endcode</td>
-<td>\code
-A.triangularView<Upper>()
-A.triangularView<StrictlyLower>()\endcode</td></tr>
-<tr><td colspan="2"></td></tr>
-<tr><td>\code
-A.part<SelfAdfjoint|UpperTriangular>();
-A.extract<SelfAdfjoint|LowerTriangular>();\endcode</td>
-<td>\code
-A.selfadjointView<Upper>()
-A.selfadjointView<Lower>()\endcode</td></tr>
-<tr><td colspan="2"></td></tr>
-<tr><td>\code
-UpperTriangular
-LowerTriangular
-UnitUpperTriangular
-UnitLowerTriangular
-StrictlyUpperTriangular
-StrictlyLowerTriangular
-\endcode</td><td>\code
-Upper
-Lower
-UnitUpper
-UnitLower
-StrictlyUpper
-StrictlyLower
-\endcode</td>
-</tr>
-</table>
-
-\sa class TriangularView, class SelfAdjointView
-
-\section TriangularSolveInPlace Triangular in-place solving
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th></tr>
-<tr><td>\code A.triangularSolveInPlace<XxxTriangular>(Y);\endcode</td><td>\code A.triangularView<Xxx>().solveInPlace(Y);\endcode</td></tr>
-</table>
-
-
-\section Decompositions Matrix decompositions
-
-Some of Eigen 2's matrix decompositions have been renamed in Eigen 3, while some others have been removed and are replaced by other decompositions in Eigen 3.
-
-<table class="manual">
- <tr>
- <th>Eigen 2</th>
- <th>Eigen 3</th>
- <th>Notes</th>
- </tr>
- <tr>
- <td>LU</td>
- <td>FullPivLU</td>
- <td class="alt">See also the new PartialPivLU, it's much faster</td>
- </tr>
- <tr>
- <td>QR</td>
- <td>HouseholderQR</td>
- <td class="alt">See also the new ColPivHouseholderQR, it's more reliable</td>
- </tr>
- <tr>
- <td>SVD</td>
- <td>JacobiSVD</td>
- <td class="alt">We currently don't have a bidiagonalizing SVD; of course this is planned.</td>
- </tr>
- <tr>
- <td>EigenSolver and friends</td>
- <td>\code #include<Eigen/Eigenvalues> \endcode </td>
- <td class="alt">Moved to separate module</td>
- </tr>
-</table>
-
-\section LinearSolvers Linear solvers
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th><th>Notes</th></tr>
-<tr><td>\code A.lu();\endcode</td>
-<td>\code A.fullPivLu();\endcode</td>
-<td class="alt">Now A.lu() returns a PartialPivLU</td></tr>
-<tr><td>\code A.lu().solve(B,&X);\endcode</td>
-<td>\code X = A.lu().solve(B);
- X = A.fullPivLu().solve(B);\endcode</td>
-<td class="alt">The returned by value is fully optimized</td></tr>
-<tr><td>\code A.llt().solve(B,&X);\endcode</td>
-<td>\code X = A.llt().solve(B);
- X = A.selfadjointView<Lower>.llt().solve(B);
- X = A.selfadjointView<Upper>.llt().solve(B);\endcode</td>
-<td class="alt">The returned by value is fully optimized and \n
-the selfadjointView API allows you to select the \n
-triangular part to work on (default is lower part)</td></tr>
-<tr><td>\code A.llt().solveInPlace(B);\endcode</td>
-<td>\code B = A.llt().solve(B);
- B = A.selfadjointView<Lower>.llt().solve(B);
- B = A.selfadjointView<Upper>.llt().solve(B);\endcode</td>
-<td class="alt">In place solving</td></tr>
-<tr><td>\code A.ldlt().solve(B,&X);\endcode</td>
-<td>\code X = A.ldlt().solve(B);
- X = A.selfadjointView<Lower>.ldlt().solve(B);
- X = A.selfadjointView<Upper>.ldlt().solve(B);\endcode</td>
-<td class="alt">The returned by value is fully optimized and \n
-the selfadjointView API allows you to select the \n
-triangular part to work on</td></tr>
-</table>
-
-\section GeometryModule Changes in the Geometry module
-
-The Geometry module is the one that changed the most. If you rely heavily on it, it's probably a good idea to use the <a href="http://eigen.tuxfamily.org/dox-3.2/Eigen2SupportModes.html">"Eigen 2 support modes"</a> to perform your migration.
-
-\section Transform The Transform class
-
-In Eigen 2, the Transform class didn't really know whether it was a projective or affine transformation. In Eigen 3, it takes a new \a Mode template parameter, which indicates whether it's \a Projective or \a Affine transform. There is no default value.
-
-The Transform3f (etc) typedefs are no more. In Eigen 3, the Transform typedefs explicitly refer to the \a Projective and \a Affine modes:
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th><th>Notes</th></tr>
-<tr>
- <td> Transform3f </td>
- <td> Affine3f or Projective3f </td>
- <td> Of course 3f is just an example here </td>
-</tr>
-</table>
-
-
-\section LazyVsNoalias Lazy evaluation and noalias
-
-In Eigen all operations are performed in a lazy fashion except the matrix products which are always evaluated into a temporary by default.
-In Eigen2, lazy evaluation could be enforced by tagging a product using the .lazy() function. However, in complex expressions it was not
-easy to determine where to put the lazy() function. In Eigen3, the lazy() feature has been superseded by the MatrixBase::noalias() function
-which can be used on the left hand side of an assignment when no aliasing can occur. Here is an example:
-\code
-MatrixXf a, b, c;
-...
-c.noalias() += 2 * a.transpose() * b;
-\endcode
-However, the noalias mechanism does not cover all the features of the old .lazy(). Indeed, in some extremely rare cases,
-it might be useful to explicit request for a lay product, i.e., for a product which will be evaluated one coefficient at once, on request,
-just like any other expressions. To this end you can use the MatrixBase::lazyProduct() function, however we strongly discourage you to
-use it unless you are sure of what you are doing, i.e., you have rigourosly measured a speed improvement.
-
-\section AlignMacros Alignment-related macros
-
-The EIGEN_ALIGN_128 macro has been renamed to EIGEN_ALIGN16. Don't be surprised, it's just that we switched to counting in bytes ;-)
-
-The \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_ALIGN \endlink option still exists in Eigen 3, but it has a new cousin: \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_ALIGN_STATICALLY.\endlink It allows to get rid of all static alignment issues while keeping alignment of dynamic-size heap-allocated arrays. Vectorization of statically allocated arrays is still preserved (unless you define \link TopicPreprocessorDirectivesPerformance EIGEN_UNALIGNED_VECTORIZE \endlink =0), at the cost of unaligned memory stores.
-
-\section AlignedMap Aligned Map objects
-
-A common issue with Eigen 2 was that when mapping an array with Map, there was no way to tell Eigen that your array was aligned. There was a ForceAligned option but it didn't mean that; it was just confusing and has been removed.
-
-New in Eigen3 is the #Aligned option. See the documentation of class Map. Use it like this:
-\code
-Map<Vector4f, Aligned> myMappedVector(some_aligned_array);
-\endcode
-There also are related convenience static methods, which actually are the preferred way as they take care of such things as constness:
-\code
-result = Vector4f::MapAligned(some_aligned_array);
-\endcode
-
-\section StdContainers STL Containers
-
-In Eigen2, <tt>\#include\<Eigen/StdVector\></tt> tweaked std::vector to automatically align elements. The problem was that that was quite invasive. In Eigen3, we only override standard behavior if you use Eigen::aligned_allocator<T> as your allocator type. So for example, if you use std::vector<Matrix4f>, you need to do the following change (note that aligned_allocator is under namespace Eigen):
-
-<table class="manual">
-<tr><th>Eigen 2</th><th>Eigen 3</th></tr>
-<tr>
- <td> \code std::vector<Matrix4f> \endcode </td>
- <td> \code std::vector<Matrix4f, aligned_allocator<Matrix4f> > \endcode </td>
-</tr>
-</table>
-
-\section eiPrefix Internal ei_ prefix
-
-In Eigen2, global internal functions and structures were prefixed by \c ei_. In Eigen3, they all have been moved into the more explicit \c internal namespace. So, e.g., \c ei_sqrt(x) now becomes \c internal::sqrt(x). Of course it is not recommended to rely on Eigen's internal features.
-
-
-
-*/
-
-}
diff --git a/doc/AsciiQuickReference.txt b/doc/AsciiQuickReference.txt
index 0ca54cef3..18b4446c6 100644
--- a/doc/AsciiQuickReference.txt
+++ b/doc/AsciiQuickReference.txt
@@ -50,6 +50,12 @@ VectorXi::LinSpaced(((hi-low)/step)+1, // low:step:hi
// Matrix slicing and blocks. All expressions listed here are read/write.
// Templated size versions are faster. Note that Matlab is 1-based (a size N
// vector is x(1)...x(N)).
+/******************************************************************************/
+/* PLEASE HELP US IMPROVING THIS SECTION */
+/* Eigen 3.4 supports a much improved API for sub-matrices, including, */
+/* slicing and indexing from arrays: */
+/* http://eigen.tuxfamily.org/dox-devel/group__TutorialSlicingIndexing.html */
+/******************************************************************************/
// Eigen // Matlab
x.head(n) // x(1:n)
x.head<n>() // x(1:n)
@@ -88,6 +94,11 @@ R.row(i) = P.col(j); // R(i, :) = P(:, j)
R.col(j1).swap(mat1.col(j2)); // R(:, [j1 j2]) = R(:, [j2, j1])
// Views, transpose, etc;
+/******************************************************************************/
+/* PLEASE HELP US IMPROVING THIS SECTION */
+/* Eigen 3.4 supports a new API for reshaping: */
+/* http://eigen.tuxfamily.org/dox-devel/group__TutorialReshape.html */
+/******************************************************************************/
// Eigen // Matlab
R.adjoint() // R'
R.transpose() // R.' or conj(R') // Read-write
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index db413bc65..0f9ef2382 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -7,11 +7,14 @@ project(EigenDoc)
if(CMAKE_COMPILER_IS_GNUCXX)
if(CMAKE_SYSTEM_NAME MATCHES Linux)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O1 -g1")
- endif(CMAKE_SYSTEM_NAME MATCHES Linux)
-endif(CMAKE_COMPILER_IS_GNUCXX)
+ endif()
+endif()
-option(EIGEN_INTERNAL_DOCUMENTATION "Build internal documentation" OFF)
+# some examples and snippets needs c++11, so let's check it once
+check_cxx_compiler_flag("-std=c++11" EIGEN_COMPILER_SUPPORT_CPP11)
+option(EIGEN_INTERNAL_DOCUMENTATION "Build internal documentation" OFF)
+option(EIGEN_DOC_USE_MATHJAX "Use MathJax for rendering math in HTML docs" ON)
# Set some Doxygen flags
set(EIGEN_DOXY_PROJECT_NAME "Eigen")
@@ -19,11 +22,18 @@ set(EIGEN_DOXY_OUTPUT_DIRECTORY_SUFFIX "")
set(EIGEN_DOXY_INPUT "\"${Eigen_SOURCE_DIR}/Eigen\" \"${Eigen_SOURCE_DIR}/doc\"")
set(EIGEN_DOXY_HTML_COLORSTYLE_HUE "220")
set(EIGEN_DOXY_TAGFILES "")
+
if(EIGEN_INTERNAL_DOCUMENTATION)
set(EIGEN_DOXY_INTERNAL "YES")
-else(EIGEN_INTERNAL_DOCUMENTATION)
+else()
set(EIGEN_DOXY_INTERNAL "NO")
-endif(EIGEN_INTERNAL_DOCUMENTATION)
+endif()
+
+if (EIGEN_DOC_USE_MATHJAX)
+ set(EIGEN_DOXY_USE_MATHJAX "YES")
+else ()
+ set(EIGEN_DOXY_USE_MATHJAX "NO")
+endif()
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in
@@ -34,8 +44,8 @@ set(EIGEN_DOXY_PROJECT_NAME "Eigen-unsupported")
set(EIGEN_DOXY_OUTPUT_DIRECTORY_SUFFIX "/unsupported")
set(EIGEN_DOXY_INPUT "\"${Eigen_SOURCE_DIR}/unsupported/Eigen\" \"${Eigen_SOURCE_DIR}/unsupported/doc\"")
set(EIGEN_DOXY_HTML_COLORSTYLE_HUE "0")
-# set(EIGEN_DOXY_TAGFILES "\"${Eigen_BINARY_DIR}/doc/eigen.doxytags =../\"")
-set(EIGEN_DOXY_TAGFILES "")
+set(EIGEN_DOXY_TAGFILES "\"${Eigen_BINARY_DIR}/doc/Eigen.doxytags=..\"")
+#set(EIGEN_DOXY_TAGFILES "")
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in
@@ -103,7 +113,7 @@ add_custom_target(doc ALL
COMMAND doxygen Doxyfile-unsupported
COMMAND ${CMAKE_COMMAND} -E copy ${Eigen_BINARY_DIR}/doc/html/group__TopicUnalignedArrayAssert.html ${Eigen_BINARY_DIR}/doc/html/TopicUnalignedArrayAssert.html
COMMAND ${CMAKE_COMMAND} -E rename html eigen-doc
- COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz
+ COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz eigen-doc/unsupported/_formulas.log eigen-doc/_formulas.log
COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc.tgz eigen-doc
COMMAND ${CMAKE_COMMAND} -E rename eigen-doc.tgz eigen-doc/eigen-doc.tgz
COMMAND ${CMAKE_COMMAND} -E rename eigen-doc html
diff --git a/doc/CoeffwiseMathFunctionsTable.dox b/doc/CoeffwiseMathFunctionsTable.dox
index 3ae9420dc..3f5c56446 100644
--- a/doc/CoeffwiseMathFunctionsTable.dox
+++ b/doc/CoeffwiseMathFunctionsTable.dox
@@ -63,7 +63,7 @@ This also means that, unless specified, if the function \c std::foo is available
\anchor cwisetable_conj
a.\link ArrayBase::conjugate conjugate\endlink(); \n
\link Eigen::conj conj\endlink(a); \n
- m.\link MatrixBase::conjugate conjugate();
+ m.\link MatrixBase::conjugate conjugate\endlink();
</td>
<td><a href="https://en.wikipedia.org/wiki/Complex_conjugate">complex conjugate</a> (\f$ \bar{a_i} \f$),\n
no-op for real </td>
@@ -74,6 +74,20 @@ This also means that, unless specified, if the function \c std::foo is available
<td>All engines (fc,fd)</td>
</tr>
<tr>
+ <td class="code">
+ \anchor cwisetable_arg
+ a.\link ArrayBase::arg arg\endlink(); \n
+ \link Eigen::arg arg\endlink(a); \n
+ m.\link MatrixBase::cwiseArg cwiseArg\endlink();
+ </td>
+ <td>phase angle of complex number</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/complex/arg">std::arg</a>; \n
+ arg(a[i]);
+ </td>
+ <td>All engines (fc,fd)</td>
+</tr>
+<tr>
<th colspan="4">Exponential functions</th>
</tr>
<tr>
@@ -133,8 +147,9 @@ This also means that, unless specified, if the function \c std::foo is available
<td class="code">
\anchor cwisetable_pow
a.\link ArrayBase::pow pow\endlink(b); \n
- \link Eigen::pow pow\endlink(a,b);
+ \link ArrayBase::pow(const Eigen::ArrayBase< Derived > &x, const Eigen::ArrayBase< ExponentDerived > &exponents) pow\endlink(a,b);
</td>
+ <!-- For some reason Doxygen thinks that pow is in ArrayBase namespace -->
<td>raises a number to the given power (\f$ a_i ^ {b_i} \f$) \n \c a and \c b can be either an array or scalar.</td>
<td class="code">
using <a href="http://en.cppreference.com/w/cpp/numeric/math/pow">std::pow</a>; \n
@@ -271,7 +286,7 @@ This also means that, unless specified, if the function \c std::foo is available
<tr>
<td class="code">
\anchor cwisetable_atan
- a.\link ArrayBase::atan tan\endlink(); \n
+ a.\link ArrayBase::atan atan\endlink(); \n
\link Eigen::atan atan\endlink(a);
</td>
<td>computes arc tangent (\f$ \tan^{-1} a_i \f$)</td>
@@ -320,6 +335,42 @@ This also means that, unless specified, if the function \c std::foo is available
<td></td>
</tr>
<tr>
+ <td class="code">
+ \anchor cwisetable_asinh
+ a.\link ArrayBase::asinh asinh\endlink(); \n
+ \link Eigen::asinh asinh\endlink(a);
+ </td>
+ <td>computes inverse hyperbolic sine</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/asinh">std::asinh</a>; \n
+ asinh(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_acosh
+ a.\link ArrayBase::acosh cohs\endlink(); \n
+ \link Eigen::acosh acosh\endlink(a);
+ </td>
+ <td>computes hyperbolic cosine</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/acosh">std::acosh</a>; \n
+ acosh(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_atanh
+ a.\link ArrayBase::atanh atanh\endlink(); \n
+ \link Eigen::atanh atanh\endlink(a);
+ </td>
+ <td>computes hyperbolic tangent</td>
+ <td class="code">
+ using <a href="http://en.cppreference.com/w/cpp/numeric/math/atanh">std::atanh</a>; \n
+ atanh(a[i]);</td>
+ <td></td>
+</tr>
+<tr>
<th colspan="4">Nearest integer floating point operations</th>
</tr>
<tr>
@@ -358,6 +409,17 @@ This also means that, unless specified, if the function \c std::foo is available
<td>SSE4,AVX,ZVector (f,d)</td>
</tr>
<tr>
+ <td class="code">
+ \anchor cwisetable_rint
+ a.\link ArrayBase::rint rint\endlink(); \n
+ \link Eigen::rint rint\endlink(a);
+ </td>
+ <td>nearest integer, \n rounding to nearest even in halfway cases</td>
+ <td>built-in generic implementation using <a href="http://en.cppreference.com/w/cpp/numeric/math/rint">\c std::rint </a>; \cpp11
+ or <a href="http://en.cppreference.com/w/c/numeric/math/rint">\c rintf </a>; </td>
+ <td>SSE4,AVX (f,d)</td>
+</tr>
+<tr>
<th colspan="4">Floating point manipulation functions</th>
</tr>
<tr>
@@ -506,7 +568,8 @@ This also means that, unless specified, if the function \c std::foo is available
<tr>
<td class="code">
\anchor cwisetable_zeta
- \link Eigen::zeta zeta\endlink(a,b);
+ \link Eigen::zeta zeta\endlink(a,b); \n
+ a.\link ArrayBase::zeta zeta\endlink(b);
</td>
<td><a href="https://en.wikipedia.org/wiki/Hurwitz_zeta_function">Hurwitz zeta function</a>
\n \f$ \zeta(a_i,b_i)=\sum_{k=0}^{\infty}(b_i+k)^{\text{-}a_i} \f$</td>
@@ -515,6 +578,18 @@ This also means that, unless specified, if the function \c std::foo is available
</td>
<td></td>
</tr>
+<tr>
+ <td class="code">
+ \anchor cwisetable_ndtri
+ a.\link ArrayBase::ndtri ndtri\endlink(); \n
+ \link Eigen::ndtri ndtri\endlink(a);
+ </td>
+ <td>Inverse of the CDF of the Normal distribution function</td>
+ <td>
+ built-in for float and double
+ </td>
+ <td></td>
+</tr>
<tr><td colspan="4"></td></tr>
</table>
diff --git a/doc/CustomizingEigen_CustomScalar.dox b/doc/CustomizingEigen_CustomScalar.dox
index 1ee78cbe5..24e5f563b 100644
--- a/doc/CustomizingEigen_CustomScalar.dox
+++ b/doc/CustomizingEigen_CustomScalar.dox
@@ -75,7 +75,7 @@ namespace Eigen {
static inline Real epsilon() { return 0; }
static inline Real dummy_precision() { return 0; }
- static inline Real digits10() { return 0; }
+ static inline int digits10() { return 0; }
enum {
IsInteger = 0,
diff --git a/doc/DenseDecompositionBenchmark.dox b/doc/DenseDecompositionBenchmark.dox
index 7be9c70cd..8f9570b7a 100644
--- a/doc/DenseDecompositionBenchmark.dox
+++ b/doc/DenseDecompositionBenchmark.dox
@@ -35,7 +35,7 @@ Timings are in \b milliseconds, and factors are relative to the LLT decompositio
+ For large problem sizes, only the decomposition implementing a cache-friendly blocking strategy scale well. Those include LLT, PartialPivLU, HouseholderQR, and BDCSVD. This explain why for a 4k x 4k matrix, HouseholderQR is faster than LDLT. In the future, LDLT and ColPivHouseholderQR will also implement blocking strategies.
+ CompleteOrthogonalDecomposition is based on ColPivHouseholderQR and they thus achieve the same level of performance.
-The above table has been generated by the <a href="https://bitbucket.org/eigen/eigen/raw/default/bench/dense_solvers.cpp">bench/dense_solvers.cpp</a> file, feel-free to hack it to generate a table matching your hardware, compiler, and favorite problem sizes.
+The above table has been generated by the <a href="https://gitlab.com/libeigen/eigen/raw/master/bench/dense_solvers.cpp">bench/dense_solvers.cpp</a> file, feel-free to hack it to generate a table matching your hardware, compiler, and favorite problem sizes.
*/
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index 48bb0a8ec..bc1e03c40 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -229,7 +229,8 @@ ALIASES = "only_for_vectors=This is only for vectors (either row-
"blank= " \
"cpp11=<span class='cpp11'>[c++11]</span>" \
"cpp14=<span class='cpp14'>[c++14]</span>" \
- "cpp17=<span class='cpp17'>[c++17]</span>"
+ "cpp17=<span class='cpp17'>[c++17]</span>" \
+ "newin{1}=<span class='newin3x'>New in %Eigen \1.</span>"
ALIASES += "eigenAutoToc= "
@@ -409,7 +410,7 @@ EXTRACT_PACKAGE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation.
-EXTRACT_STATIC = NO
+EXTRACT_STATIC = YES
# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
# defined locally in source files will be included in the documentation.
@@ -736,6 +737,14 @@ EXCLUDE = "${Eigen_SOURCE_DIR}/Eigen/src/Core/products" \
"${Eigen_SOURCE_DIR}/unsupported/doc/examples" \
"${Eigen_SOURCE_DIR}/unsupported/doc/snippets"
+# Forward declarations of class templates cause the title of the main page for
+# the class template to not contain the template signature. This only happens
+# when the \class command is used to document the class. Possibly caused
+# by https://github.com/doxygen/doxygen/issues/7698. Confirmed fixed by
+# doxygen release 1.8.19.
+
+EXCLUDE += "${Eigen_SOURCE_DIR}/Eigen/src/Core/util/ForwardDeclarations.h"
+
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
# from the input.
@@ -1245,7 +1254,7 @@ FORMULA_TRANSPARENT = YES
# output. When enabled you may also need to install MathJax separately and
# configure the path to it using the MATHJAX_RELPATH option.
-USE_MATHJAX = NO
+USE_MATHJAX = @EIGEN_DOXY_USE_MATHJAX@
# When MathJax is enabled you need to specify the location relative to the
# HTML output directory using the MATHJAX_RELPATH option. The destination
@@ -1257,12 +1266,12 @@ USE_MATHJAX = NO
# However, it is strongly recommended to install a local
# copy of MathJax from http://www.mathjax.org before deployment.
-MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+MATHJAX_RELPATH = https://cdn.mathjax.org/mathjax/latest
# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
# names that should be enabled during MathJax rendering.
-MATHJAX_EXTENSIONS =
+MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# When the SEARCHENGINE tag is enabled doxygen will generate a search box
# for the HTML output. The underlying search engine uses javascript
@@ -1591,11 +1600,14 @@ PREDEFINED = EIGEN_EMPTY_STRUCT \
EIGEN_QT_SUPPORT \
EIGEN_STRONG_INLINE=inline \
EIGEN_DEVICE_FUNC= \
+ EIGEN_HAS_CXX11=1 \
+ EIGEN_HAS_CXX11_MATH=1 \
"EIGEN_MAKE_CWISE_BINARY_OP(METHOD,FUNCTOR)=template<typename OtherDerived> const CwiseBinaryOp<FUNCTOR<Scalar>, const Derived, const OtherDerived> METHOD(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const;" \
"EIGEN_CWISE_PRODUCT_RETURN_TYPE(LHS,RHS)=CwiseBinaryOp<internal::scalar_product_op<LHS::Scalar,RHS::Scalar>, const LHS, const RHS>"\
"EIGEN_CAT2(a,b)= a ## b"\
"EIGEN_CAT(a,b)=EIGEN_CAT2(a,b)"\
"EIGEN_CWISE_BINARY_RETURN_TYPE(LHS,RHS,OPNAME)=CwiseBinaryOp<EIGEN_CAT(EIGEN_CAT(internal::scalar_,OPNAME),_op)<LHS::Scalar, RHS::Scalar>, const LHS, const RHS>"\
+ "EIGEN_ALIGN_TO_BOUNDARY(x)="\
DOXCOMMA=,
@@ -1608,6 +1620,9 @@ PREDEFINED = EIGEN_EMPTY_STRUCT \
EXPAND_AS_DEFINED = EIGEN_MAKE_TYPEDEFS \
EIGEN_MAKE_FIXED_TYPEDEFS \
EIGEN_MAKE_TYPEDEFS_ALL_SIZES \
+ EIGEN_MAKE_ARRAY_TYPEDEFS \
+ EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS \
+ EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES \
EIGEN_CWISE_UNOP_RETURN_TYPE \
EIGEN_CWISE_BINOP_RETURN_TYPE \
EIGEN_CURRENT_STORAGE_BASE_CLASS \
@@ -1618,6 +1633,9 @@ EXPAND_AS_DEFINED = EIGEN_MAKE_TYPEDEFS \
EIGEN_EULER_ANGLES_TYPEDEFS \
EIGEN_EULER_ANGLES_SINGLE_TYPEDEF \
EIGEN_EULER_SYSTEM_TYPEDEF \
+ EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY \
+ EIGEN_MATRIX_FUNCTION \
+ EIGEN_MATRIX_FUNCTION_1 \
EIGEN_DOC_UNARY_ADDONS \
EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL \
EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF
@@ -1665,7 +1683,7 @@ ALLEXTERNALS = NO
# in the modules index. If set to NO, only the current project's groups will
# be listed.
-EXTERNAL_GROUPS = YES
+EXTERNAL_GROUPS = NO
# The PERL_PATH should be the absolute path and name of the perl script
# interpreter (i.e. the result of `which perl').
@@ -1763,7 +1781,7 @@ UML_LOOK = YES
# the class node. If there are many fields or methods and many nodes the
# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
# threshold limits the number of items for each type to make the size more
-# managable. Set this to 0 for no limit. Note that the threshold may be
+# manageable. Set this to 0 for no limit. Note that the threshold may be
# exceeded by 50% before the limit is enforced.
UML_LIMIT_NUM_FIELDS = 10
diff --git a/doc/FixedSizeVectorizable.dox b/doc/FixedSizeVectorizable.dox
index 49e38af76..0012465ca 100644
--- a/doc/FixedSizeVectorizable.dox
+++ b/doc/FixedSizeVectorizable.dox
@@ -1,6 +1,6 @@
namespace Eigen {
-/** \eigenManualPage TopicFixedSizeVectorizable Fixed-size vectorizable Eigen objects
+/** \eigenManualPage TopicFixedSizeVectorizable Fixed-size vectorizable %Eigen objects
The goal of this page is to explain what we mean by "fixed-size vectorizable".
@@ -23,15 +23,15 @@ Examples include:
\section FixedSizeVectorizable_explanation Explanation
-First, "fixed-size" should be clear: an Eigen object has fixed size if its number of rows and its number of columns are fixed at compile-time. So for example Matrix3f has fixed size, but MatrixXf doesn't (the opposite of fixed-size is dynamic-size).
+First, "fixed-size" should be clear: an %Eigen object has fixed size if its number of rows and its number of columns are fixed at compile-time. So for example \ref Matrix3f has fixed size, but \ref MatrixXf doesn't (the opposite of fixed-size is dynamic-size).
-The array of coefficients of a fixed-size Eigen object is a plain "static array", it is not dynamically allocated. For example, the data behind a Matrix4f is just a "float array[16]".
+The array of coefficients of a fixed-size %Eigen object is a plain "static array", it is not dynamically allocated. For example, the data behind a \ref Matrix4f is just a "float array[16]".
Fixed-size objects are typically very small, which means that we want to handle them with zero runtime overhead -- both in terms of memory usage and of speed.
-Now, vectorization (both SSE and AltiVec) works with 128-bit packets. Moreover, for performance reasons, these packets need to be have 128-bit alignment.
+Now, vectorization works with 128-bit packets (e.g., SSE, AltiVec, NEON), 256-bit packets (e.g., AVX), or 512-bit packets (e.g., AVX512). Moreover, for performance reasons, these packets are most efficiently read and written if they have the same alignment as the packet size, that is 16 bytes, 32 bytes, and 64 bytes respectively.
-So it turns out that the only way that fixed-size Eigen objects can be vectorized, is if their size is a multiple of 128 bits, or 16 bytes. Eigen will then request 16-byte alignment for these objects, and henceforth rely on these objects being aligned so no runtime check for alignment is performed.
+So it turns out that the best way that fixed-size %Eigen objects can be vectorized, is if their size is a multiple of 16 bytes (or more). %Eigen will then request 16-byte alignment (or more) for these objects, and henceforth rely on these objects being aligned to achieve maximal efficiency.
*/
diff --git a/doc/FunctionsTakingEigenTypes.dox b/doc/FunctionsTakingEigenTypes.dox
index 152dda47d..6b4e49214 100644
--- a/doc/FunctionsTakingEigenTypes.dox
+++ b/doc/FunctionsTakingEigenTypes.dox
@@ -79,7 +79,7 @@ These examples are just intended to give the reader a first impression of how fu
\section TopicUsingRefClass How to write generic, but non-templated function?
-In all the previous examples, the functions had to be template functions. This approach allows to write very generic code, but it is often desirable to write non templated function and still keep some level of genericity to avoid stupid copies of the arguments. The typical example is to write functions accepting both a MatrixXf or a block of a MatrixXf. This exactly the purpose of the Ref class. Here is a simple example:
+In all the previous examples, the functions had to be template functions. This approach allows to write very generic code, but it is often desirable to write non templated functions and still keep some level of genericity to avoid stupid copies of the arguments. The typical example is to write functions accepting both a MatrixXf or a block of a MatrixXf. This is exactly the purpose of the Ref class. Here is a simple example:
<table class="example">
<tr><th>Example:</th><th>Output:</th></tr>
@@ -133,7 +133,7 @@ In this special case, the example is fine and will be working because both param
\section TopicPlainFunctionsFailing In which cases do functions taking a plain Matrix or Array argument fail?
-Here, we consider a slightly modified version of the function given above. This time, we do not want to return the result but pass an additional non-const paramter which allows us to store the result. A first naive implementation might look as follows.
+Here, we consider a slightly modified version of the function given above. This time, we do not want to return the result but pass an additional non-const parameter which allows us to store the result. A first naive implementation might look as follows.
\code
// Note: This code is flawed!
void cov(const MatrixXf& x, const MatrixXf& y, MatrixXf& C)
@@ -176,7 +176,7 @@ The implementation above does now not only work with temporary expressions but i
\section TopicResizingInGenericImplementations How to resize matrices in generic implementations?
-One might think we are done now, right? This is not completely true because in order for our covariance function to be generically applicable, we want the follwing code to work
+One might think we are done now, right? This is not completely true because in order for our covariance function to be generically applicable, we want the following code to work
\code
MatrixXf x = MatrixXf::Random(100,3);
MatrixXf y = MatrixXf::Random(100,3);
diff --git a/doc/HiPerformance.dox b/doc/HiPerformance.dox
index ab6cdfd44..9cee3351c 100644
--- a/doc/HiPerformance.dox
+++ b/doc/HiPerformance.dox
@@ -105,7 +105,7 @@ m1.noalias() += m2 * m3; \endcode</td>
<td>First of all, here the .noalias() in the first expression is useless because
m2*m3 will be evaluated anyway. However, note how this expression can be rewritten
so that no temporary is required. (tip: for very small fixed size matrix
- it is slighlty better to rewrite it like this: m1.noalias() = m2 * m3; m1 += m4;</td>
+ it is slightly better to rewrite it like this: m1.noalias() = m2 * m3; m1 += m4;</td>
</tr>
<tr class="alt">
<td>\code
diff --git a/doc/InsideEigenExample.dox b/doc/InsideEigenExample.dox
index ed053c69d..ea2275bf2 100644
--- a/doc/InsideEigenExample.dox
+++ b/doc/InsideEigenExample.dox
@@ -212,6 +212,11 @@ Thus, the operator+ hasn't performed any actual computation. To summarize, the o
\section Assignment The assignment
+<div class="warningbox">
+<strong>PLEASE HELP US IMPROVING THIS SECTION.</strong>
+This page reflects how %Eigen worked until 3.2, but since %Eigen 3.3 the assignment is more sophisticated as it involves an Assignment expression, and the creation of so called evaluator which are responsible for the evaluation of each kind of expressions.
+</div>
+
At this point, the expression \a v + \a w has finished evaluating, so, in the process of compiling the line of code
\code
u = v + w;
diff --git a/doc/LeastSquares.dox b/doc/LeastSquares.dox
index e2191a22f..ddbf38dec 100644
--- a/doc/LeastSquares.dox
+++ b/doc/LeastSquares.dox
@@ -16,7 +16,7 @@ equations is the fastest but least accurate, and the QR decomposition is in betw
\section LeastSquaresSVD Using the SVD decomposition
-The \link JacobiSVD::solve() solve() \endlink method in the JacobiSVD class can be directly used to
+The \link BDCSVD::solve() solve() \endlink method in the BDCSVD class can be directly used to
solve linear squares systems. It is not enough to compute only the singular values (the default for
this class); you also need the singular vectors but the thin SVD decomposition suffices for
computing least squares solutions:
@@ -30,14 +30,17 @@ computing least squares solutions:
</table>
This is example from the page \link TutorialLinearAlgebra Linear algebra and decompositions \endlink.
+If you just need to solve the least squares problem, but are not interested in the SVD per se, a
+faster alternative method is CompleteOrthogonalDecomposition.
\section LeastSquaresQR Using the QR decomposition
The solve() method in QR decomposition classes also computes the least squares solution. There are
-three QR decomposition classes: HouseholderQR (no pivoting, so fast but unstable),
-ColPivHouseholderQR (column pivoting, thus a bit slower but more accurate) and FullPivHouseholderQR
-(full pivoting, so slowest and most stable). Here is an example with column pivoting:
+three QR decomposition classes: HouseholderQR (no pivoting, fast but unstable if your matrix is
+not rull rank), ColPivHouseholderQR (column pivoting, thus a bit slower but more stable) and
+FullPivHouseholderQR (full pivoting, so slowest and slightly more stable than ColPivHouseholderQR).
+Here is an example with column pivoting:
<table class="example">
<tr><th>Example:</th><th>Output:</th></tr>
@@ -61,9 +64,11 @@ Finding the least squares solution of \a Ax = \a b is equivalent to solving the
</tr>
</table>
-If the matrix \a A is ill-conditioned, then this is not a good method, because the condition number
+This method is usually the fastest, especially when \a A is "tall and skinny". However, if the
+matrix \a A is even mildly ill-conditioned, this is not a good method, because the condition number
of <i>A</i><sup>T</sup><i>A</i> is the square of the condition number of \a A. This means that you
-lose twice as many digits using normal equation than if you use the other methods.
+lose roughly twice as many digits of accuracy using the normal equation, compared to the more stable
+methods mentioned above.
*/
diff --git a/doc/Manual.dox b/doc/Manual.dox
index 342b145fd..84f0db645 100644
--- a/doc/Manual.dox
+++ b/doc/Manual.dox
@@ -15,7 +15,6 @@ namespace Eigen {
/** \page UserManual_Generalities General topics
- - \subpage Eigen2ToEigen3
- \subpage TopicFunctionTakingEigenTypes
- \subpage TopicPreprocessorDirectives
- \subpage TopicAssertions
@@ -64,42 +63,46 @@ namespace Eigen {
\ingroup DenseMatrixManipulation_chapter */
/** \addtogroup TutorialBlockOperations
\ingroup DenseMatrixManipulation_chapter */
+/** \addtogroup TutorialSlicingIndexing
+ \ingroup DenseMatrixManipulation_chapter */
/** \addtogroup TutorialAdvancedInitialization
\ingroup DenseMatrixManipulation_chapter */
/** \addtogroup TutorialReductionsVisitorsBroadcasting
\ingroup DenseMatrixManipulation_chapter */
-/** \addtogroup TutorialMapClass
+/** \addtogroup TutorialReshape
\ingroup DenseMatrixManipulation_chapter */
-/** \addtogroup TutorialReshapeSlicing
+/** \addtogroup TutorialSTL
+ \ingroup DenseMatrixManipulation_chapter */
+/** \addtogroup TutorialMapClass
\ingroup DenseMatrixManipulation_chapter */
/** \addtogroup TopicAliasing
\ingroup DenseMatrixManipulation_chapter */
/** \addtogroup TopicStorageOrders
\ingroup DenseMatrixManipulation_chapter */
-
+
/** \addtogroup DenseMatrixManipulation_Alignement
- \ingroup DenseMatrixManipulation_chapter */
-/** \addtogroup TopicUnalignedArrayAssert
- \ingroup DenseMatrixManipulation_Alignement */
-/** \addtogroup TopicFixedSizeVectorizable
- \ingroup DenseMatrixManipulation_Alignement */
-/** \addtogroup TopicStructHavingEigenMembers
- \ingroup DenseMatrixManipulation_Alignement */
-/** \addtogroup TopicStlContainers
- \ingroup DenseMatrixManipulation_Alignement */
-/** \addtogroup TopicPassingByValue
- \ingroup DenseMatrixManipulation_Alignement */
-/** \addtogroup TopicWrongStackAlignment
- \ingroup DenseMatrixManipulation_Alignement */
+ \ingroup DenseMatrixManipulation_chapter */
+/** \addtogroup TopicUnalignedArrayAssert
+ \ingroup DenseMatrixManipulation_Alignement */
+/** \addtogroup TopicFixedSizeVectorizable
+ \ingroup DenseMatrixManipulation_Alignement */
+/** \addtogroup TopicStructHavingEigenMembers
+ \ingroup DenseMatrixManipulation_Alignement */
+/** \addtogroup TopicStlContainers
+ \ingroup DenseMatrixManipulation_Alignement */
+/** \addtogroup TopicPassingByValue
+ \ingroup DenseMatrixManipulation_Alignement */
+/** \addtogroup TopicWrongStackAlignment
+ \ingroup DenseMatrixManipulation_Alignement */
/** \addtogroup DenseMatrixManipulation_Reference
- \ingroup DenseMatrixManipulation_chapter */
-/** \addtogroup Core_Module
- \ingroup DenseMatrixManipulation_Reference */
-/** \addtogroup Jacobi_Module
- \ingroup DenseMatrixManipulation_Reference */
-/** \addtogroup Householder_Module
- \ingroup DenseMatrixManipulation_Reference */
+ \ingroup DenseMatrixManipulation_chapter */
+/** \addtogroup Core_Module
+ \ingroup DenseMatrixManipulation_Reference */
+/** \addtogroup Jacobi_Module
+ \ingroup DenseMatrixManipulation_Reference */
+/** \addtogroup Householder_Module
+ \ingroup DenseMatrixManipulation_Reference */
/** \addtogroup CoeffwiseMathFunctions
\ingroup DenseMatrixManipulation_chapter */
diff --git a/doc/Overview.dox b/doc/Overview.dox
index dbb49bd21..43a12871e 100644
--- a/doc/Overview.dox
+++ b/doc/Overview.dox
@@ -4,8 +4,6 @@ namespace Eigen {
This is the API documentation for Eigen3. You can <a href="eigen-doc.tgz">download</a> it as a tgz archive for offline reading.
-You're already an Eigen2 user? Here is a \link Eigen2ToEigen3 Eigen2 to Eigen3 guide \endlink to help porting your application.
-
For a first contact with Eigen, the best place is to have a look at the \link GettingStarted getting started \endlink page that show you how to write and compile your first program with Eigen.
Then, the \b quick \b reference \b pages give you a quite complete description of the API in a very condensed format that is specially useful to recall the syntax of a particular feature, or to have a quick look at the API. They currently cover the two following feature sets, and more will come in the future:
diff --git a/doc/PassingByValue.dox b/doc/PassingByValue.dox
index bf4d0ef4b..9254fe6d8 100644
--- a/doc/PassingByValue.dox
+++ b/doc/PassingByValue.dox
@@ -4,21 +4,21 @@ namespace Eigen {
Passing objects by value is almost always a very bad idea in C++, as this means useless copies, and one should pass them by reference instead.
-With Eigen, this is even more important: passing \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen objects" by value is not only inefficient, it can be illegal or make your program crash! And the reason is that these Eigen objects have alignment modifiers that aren't respected when they are passed by value.
+With %Eigen, this is even more important: passing \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen objects" by value is not only inefficient, it can be illegal or make your program crash! And the reason is that these %Eigen objects have alignment modifiers that aren't respected when they are passed by value.
-So for example, a function like this, where v is passed by value:
+For example, a function like this, where \c v is passed by value:
\code
void my_function(Eigen::Vector2d v);
\endcode
-needs to be rewritten as follows, passing v by reference:
+needs to be rewritten as follows, passing \c v by const reference:
\code
void my_function(const Eigen::Vector2d& v);
\endcode
-Likewise if you have a class having a Eigen object as member:
+Likewise if you have a class having an %Eigen object as member:
\code
struct Foo
diff --git a/doc/Pitfalls.dox b/doc/Pitfalls.dox
index cf42effef..85282bd6f 100644
--- a/doc/Pitfalls.dox
+++ b/doc/Pitfalls.dox
@@ -2,13 +2,35 @@ namespace Eigen {
/** \page TopicPitfalls Common pitfalls
+
\section TopicPitfalls_template_keyword Compilation error with template methods
See this \link TopicTemplateKeyword page \endlink.
+
+\section TopicPitfalls_aliasing Aliasing
+
+Don't miss this \link TopicAliasing page \endlink on aliasing,
+especially if you got wrong results in statements where the destination appears on the right hand side of the expression.
+
+
+\section TopicPitfalls_alignment_issue Alignment Issues (runtime assertion)
+
+%Eigen does explicit vectorization, and while that is appreciated by many users, that also leads to some issues in special situations where data alignment is compromised.
+Indeed, prior to C++17, C++ does not have quite good enough support for explicit data alignment.
+In that case your program hits an assertion failure (that is, a "controlled crash") with a message that tells you to consult this page:
+\code
+http://eigen.tuxfamily.org/dox/group__TopicUnalignedArrayAssert.html
+\endcode
+Have a look at \link TopicUnalignedArrayAssert it \endlink and see for yourself if that's something that you can cope with.
+It contains detailed information about how to deal with each known cause for that issue.
+
+Now what if you don't care about vectorization and so don't want to be annoyed with these alignment issues? Then read \link getrid how to get rid of them \endlink.
+
+
\section TopicPitfalls_auto_keyword C++11 and the auto keyword
-In short: do not use the auto keywords with Eigen's expressions, unless you are 100% sure about what you are doing. In particular, do not use the auto keyword as a replacement for a Matrix<> type. Here is an example:
+In short: do not use the auto keywords with %Eigen's expressions, unless you are 100% sure about what you are doing. In particular, do not use the auto keyword as a replacement for a \c Matrix<> type. Here is an example:
\code
MatrixXd A, B;
@@ -16,23 +38,112 @@ auto C = A*B;
for(...) { ... w = C * v; ...}
\endcode
-In this example, the type of C is not a MatrixXd but an abstract expression representing a matrix product and storing references to A and B. Therefore, the product of A*B will be carried out multiple times, once per iteration of the for loop. Moreover, if the coefficients of A or B change during the iteration, then C will evaluate to different values.
+In this example, the type of C is not a \c MatrixXd but an abstract expression representing a matrix product and storing references to \c A and \c B.
+Therefore, the product of \c A*B will be carried out multiple times, once per iteration of the for loop.
+Moreover, if the coefficients of `A` or `B` change during the iteration, then `C` will evaluate to different values as in the following example:
+
+\code
+MatrixXd A = ..., B = ...;
+auto C = A*B;
+MatrixXd R1 = C;
+A = ...;
+MatrixXd R2 = C;
+\endcode
+for which we end up with `R1` &ne; `R2`.
+
Here is another example leading to a segfault:
\code
auto C = ((A+B).eval()).transpose();
// do something with C
\endcode
-The problem is that eval() returns a temporary object (in this case a MatrixXd) which is then referenced by the Transpose<> expression. However, this temporary is deleted right after the first line, and there the C expression reference a dead object. The same issue might occur when sub expressions are automatically evaluated by Eigen as in the following example:
+The problem is that \c eval() returns a temporary object (in this case a \c MatrixXd) which is then referenced by the \c Transpose<> expression.
+However, this temporary is deleted right after the first line, and then the \c C expression references a dead object.
+One possible fix consists in applying \c eval() on the whole expression:
+\code
+auto C = (A+B).transpose().eval();
+\endcode
+
+The same issue might occur when sub expressions are automatically evaluated by %Eigen as in the following example:
\code
VectorXd u, v;
auto C = u + (A*v).normalized();
// do something with C
\endcode
-where the normalized() method has to evaluate the expensive product A*v to avoid evaluating it twice. On the other hand, the following example is perfectly fine:
+Here the \c normalized() method has to evaluate the expensive product \c A*v to avoid evaluating it twice.
+Again, one possible fix is to call \c .eval() on the whole expression:
\code
auto C = (u + (A*v).normalized()).eval();
\endcode
-In this case, C will be a regular VectorXd object.
+In this case, \c C will be a regular \c VectorXd object.
+Note that DenseBase::eval() is smart enough to avoid copies when the underlying expression is already a plain \c Matrix<>.
+
+
+\section TopicPitfalls_header_issues Header Issues (failure to compile)
+
+With all libraries, one must check the documentation for which header to include.
+The same is true with %Eigen, but slightly worse: with %Eigen, a method in a class may require an additional \c \#include over what the class itself requires!
+For example, if you want to use the \c cross() method on a vector (it computes a cross-product) then you need to:
+\code
+#include<Eigen/Geometry>
+\endcode
+We try to always document this, but do tell us if we forgot an occurrence.
+
+
+\section TopicPitfalls_ternary_operator Ternary operator
+
+In short: avoid the use of the ternary operator <code>(COND ? THEN : ELSE)</code> with %Eigen's expressions for the \c THEN and \c ELSE statements.
+To see why, let's consider the following example:
+\code
+Vector3f A;
+A << 1, 2, 3;
+Vector3f B = ((1 < 0) ? (A.reverse()) : A);
+\endcode
+This example will return <code>B = 3, 2, 1</code>. Do you see why?
+The reason is that in c++ the type of the \c ELSE statement is inferred from the type of the \c THEN expression such that both match.
+Since \c THEN is a <code>Reverse<Vector3f></code>, the \c ELSE statement A is converted to a <code>Reverse<Vector3f></code>, and the compiler thus generates:
+\code
+Vector3f B = ((1 < 0) ? (A.reverse()) : Reverse<Vector3f>(A));
+\endcode
+In this very particular case, a workaround would be to call A.reverse().eval() for the \c THEN statement, but the safest and fastest is really to avoid this ternary operator with %Eigen's expressions and use a if/else construct.
+
+
+\section TopicPitfalls_pass_by_value Pass-by-value
+
+If you don't know why passing-by-value is wrong with %Eigen, read this \link TopicPassingByValue page \endlink first.
+
+While you may be extremely careful and use care to make sure that all of your code that explicitly uses %Eigen types is pass-by-reference you have to watch out for templates which define the argument types at compile time.
+
+If a template has a function that takes arguments pass-by-value, and the relevant template parameter ends up being an %Eigen type, then you will of course have the same alignment problems that you would in an explicitly defined function passing %Eigen types by reference.
+
+Using %Eigen types with other third party libraries or even the STL can present the same problem.
+<code>boost::bind</code> for example uses pass-by-value to store arguments in the returned functor.
+This will of course be a problem.
+
+There are at least two ways around this:
+ - If the value you are passing is guaranteed to be around for the life of the functor, you can use boost::ref() to wrap the value as you pass it to boost::bind. Generally this is not a solution for values on the stack as if the functor ever gets passed to a lower or independent scope, the object may be gone by the time it's attempted to be used.
+ - The other option is to make your functions take a reference counted pointer like boost::shared_ptr as the argument. This avoids needing to worry about managing the lifetime of the object being passed.
+
+
+\section TopicPitfalls_matrix_bool Matrices with boolean coefficients
+
+The current behaviour of using \c Matrix with boolean coefficients is inconsistent and likely to change in future versions of Eigen, so please use it carefully!
+
+A simple example for such an inconsistency is
+
+\code
+template<int Size>
+void foo() {
+ Eigen::Matrix<bool, Size, Size> A, B, C;
+ A.setOnes();
+ B.setOnes();
+
+ C = A * B - A * B;
+ std::cout << C << "\n";
+}
+\endcode
+
+since calling \c foo<3>() prints the zero matrix while calling \c foo<10>() prints the identity matrix.
+
*/
}
diff --git a/doc/PreprocessorDirectives.dox b/doc/PreprocessorDirectives.dox
index f01b39aec..0f545b086 100644
--- a/doc/PreprocessorDirectives.dox
+++ b/doc/PreprocessorDirectives.dox
@@ -51,7 +51,7 @@ are doing.
\section TopicPreprocessorDirectivesCppVersion C++ standard features
-By default, %Eigen strive to automatically detect and enable langage features at compile-time based on
+By default, %Eigen strive to automatically detect and enable language features at compile-time based on
the information provided by the compiler.
- \b EIGEN_MAX_CPP_VER - disables usage of C++ features requiring a version greater than EIGEN_MAX_CPP_VER.
@@ -66,7 +66,7 @@ functions by defining EIGEN_HAS_C99_MATH=1.
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
- \b EIGEN_HAS_CXX11_MATH - controls the implementation of some functions such as round, logp1, isinf, isnan, etc.
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
- - \b EIGEN_HAS_RVALUE_REFERENCES - defines whetehr rvalue references are supported
+ - \b EIGEN_HAS_RVALUE_REFERENCES - defines whether rvalue references are supported
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
- \b EIGEN_HAS_STD_RESULT_OF - defines whether std::result_of is supported
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
@@ -78,6 +78,7 @@ functions by defining EIGEN_HAS_C99_MATH=1.
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
- \b EIGEN_HAS_CXX11_NOEXCEPT - defines whether noexcept is supported
Automatic detection disabled if EIGEN_MAX_CPP_VER<11.
+ - \b EIGEN_NO_IO - Disables any usage and support for `<iostreams>`.
\section TopicPreprocessorDirectivesAssertions Assertions
@@ -106,7 +107,7 @@ run time. However, these assertions do cost time and can thus be turned off.
Let us emphasize that \c EIGEN_MAX_*_ALIGN_BYTES define only a diserable upper bound. In practice data is aligned to largest power-of-two common divisor of \c EIGEN_MAX_STATIC_ALIGN_BYTES and the size of the data, such that memory is not wasted.
- \b \c EIGEN_DONT_PARALLELIZE - if defined, this disables multi-threading. This is only relevant if you enabled OpenMP.
See \ref TopicMultiThreading for details.
- - \b EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless
+ - \b \c EIGEN_DONT_VECTORIZE - disables explicit vectorization when defined. Not defined by default, unless
alignment is disabled by %Eigen's platform test or the user defining \c EIGEN_DONT_ALIGN.
- \b \c EIGEN_UNALIGNED_VECTORIZE - disables/enables vectorization with unaligned stores. Default is 1 (enabled).
If set to 0 (disabled), then expression for which the destination cannot be aligned are not vectorized (e.g., unaligned
@@ -116,13 +117,21 @@ run time. However, these assertions do cost time and can thus be turned off.
Define it to 0 to disable.
- \b \c EIGEN_UNROLLING_LIMIT - defines the size of a loop to enable meta unrolling. Set it to zero to disable
unrolling. The size of a loop here is expressed in %Eigen's own notion of "number of FLOPS", it does not
- correspond to the number of iterations or the number of instructions. The default is value 100.
+ correspond to the number of iterations or the number of instructions. The default is value 110.
- \b \c EIGEN_STACK_ALLOCATION_LIMIT - defines the maximum bytes for a buffer to be allocated on the stack. For internal
temporary buffers, dynamic memory allocation is employed as a fall back. For fixed-size matrices or arrays, exceeding
this threshold raises a compile time assertion. Use 0 to set no limit. Default is 128 KB.
-
-
- - \c EIGEN_DONT_ALIGN - Deprecated, it is a synonym for \c EIGEN_MAX_ALIGN_BYTES=0. It disables alignment completely. %Eigen will not try to align its objects and does not expect that any objects passed to it are aligned. This will turn off vectorization if \b EIGEN_UNALIGNED_VECTORIZE=1. Not defined by default.
+ - \b \c EIGEN_NO_CUDA - disables CUDA support when defined. Might be useful in .cu files for which Eigen is used on the host only,
+ and never called from device code.
+ - \b \c EIGEN_STRONG_INLINE - This macro is used to qualify critical functions and methods that we expect the compiler to inline.
+ By default it is defined to \c __forceinline for MSVC and ICC, and to \c inline for other compilers. A tipical usage is to
+ define it to \c inline for MSVC users wanting faster compilation times, at the risk of performance degradations in some rare
+ cases for which MSVC inliner fails to do a good job.
+ - \b \c EIGEN_DEFAULT_L1_CACHE_SIZE - Sets the default L1 cache size that is used in Eigen's GEBP kernel when the correct cache size cannot be determined at runtime.
+ - \b \c EIGEN_DEFAULT_L2_CACHE_SIZE - Sets the default L2 cache size that is used in Eigen's GEBP kernel when the correct cache size cannot be determined at runtime.
+ - \b \c EIGEN_DEFAULT_L3_CACHE_SIZE - Sets the default L3 cache size that is used in Eigen's GEBP kernel when the correct cache size cannot be determined at runtime.
+
+ - \c EIGEN_DONT_ALIGN - Deprecated, it is a synonym for \c EIGEN_MAX_ALIGN_BYTES=0. It disables alignment completely. %Eigen will not try to align its objects and does not expect that any objects passed to it are aligned. This will turn off vectorization if \b \c EIGEN_UNALIGNED_VECTORIZE=1. Not defined by default.
- \c EIGEN_DONT_ALIGN_STATICALLY - Deprecated, it is a synonym for \c EIGEN_MAX_STATIC_ALIGN_BYTES=0. It disables alignment of arrays on the stack. Not defined by default, unless \c EIGEN_DONT_ALIGN is defined.
@@ -137,18 +146,18 @@ following macros are supported; none of them are defined by default.
- \b EIGEN_CWISE_PLUGIN - filename of plugin for extending the Cwise class.
- \b EIGEN_DENSEBASE_PLUGIN - filename of plugin for extending the DenseBase class.
- \b EIGEN_DYNAMICSPARSEMATRIX_PLUGIN - filename of plugin for extending the DynamicSparseMatrix class.
+ - \b EIGEN_FUNCTORS_PLUGIN - filename of plugin for adding new functors and specializations of functor_traits.
+ - \b EIGEN_MAPBASE_PLUGIN - filename of plugin for extending the MapBase class.
- \b EIGEN_MATRIX_PLUGIN - filename of plugin for extending the Matrix class.
- \b EIGEN_MATRIXBASE_PLUGIN - filename of plugin for extending the MatrixBase class.
- \b EIGEN_PLAINOBJECTBASE_PLUGIN - filename of plugin for extending the PlainObjectBase class.
- - \b EIGEN_MAPBASE_PLUGIN - filename of plugin for extending the MapBase class.
- \b EIGEN_QUATERNION_PLUGIN - filename of plugin for extending the Quaternion class.
- \b EIGEN_QUATERNIONBASE_PLUGIN - filename of plugin for extending the QuaternionBase class.
- \b EIGEN_SPARSEMATRIX_PLUGIN - filename of plugin for extending the SparseMatrix class.
- \b EIGEN_SPARSEMATRIXBASE_PLUGIN - filename of plugin for extending the SparseMatrixBase class.
- \b EIGEN_SPARSEVECTOR_PLUGIN - filename of plugin for extending the SparseVector class.
- \b EIGEN_TRANSFORM_PLUGIN - filename of plugin for extending the Transform class.
- - \b EIGEN_FUNCTORS_PLUGIN - filename of plugin for adding new functors and specializations of functor_traits.
-
+ - \b EIGEN_VECTORWISEOP_PLUGIN - filename of plugin for extending the VectorwiseOp class.
\section TopicPreprocessorDirectivesDevelopers Macros for Eigen developers
diff --git a/doc/QuickReference.dox b/doc/QuickReference.dox
index 44f5410db..c5dfce421 100644
--- a/doc/QuickReference.dox
+++ b/doc/QuickReference.dox
@@ -68,7 +68,7 @@ Array<float,4,1> <=> Array4f
Conversion between the matrix and array worlds:
\code
-Array44f a1, a1;
+Array44f a1, a2;
Matrix4f m1, m2;
m1 = a1 * a2; // coeffwise product, implicit conversion from array to matrix.
a1 = m1 * m2; // matrix product, implicit conversion from matrix to array.
@@ -261,6 +261,8 @@ x.setIdentity();
Vector3f::UnitX() // 1 0 0
Vector3f::UnitY() // 0 1 0
Vector3f::UnitZ() // 0 0 1
+Vector4f::Unit(i)
+x.setUnit(i);
\endcode
</td>
<td>
@@ -278,6 +280,7 @@ N/A
VectorXf::Unit(size,i)
+x.setUnit(size,i);
VectorXf::Unit(4,1) == Vector4f(0,1,0,0)
== Vector4f::UnitY()
\endcode
@@ -285,7 +288,12 @@ VectorXf::Unit(4,1) == Vector4f(0,1,0,0)
</tr>
</table>
-
+Note that it is allowed to call any of the \c set* functions to a dynamic-sized vector or matrix without passing new sizes.
+For instance:
+\code
+MatrixXi M(3,3);
+M.setIdentity();
+\endcode
\subsection QuickRef_Map Mapping external arrays
@@ -472,13 +480,14 @@ The main difference between the two API is that the one based on cwise* methods
while the second one (based on .array()) returns an array expression.
Recall that .array() has no cost, it only changes the available API and interpretation of the data.
-It is also very simple to apply any user defined function \c foo using DenseBase::unaryExpr together with <a href="http://en.cppreference.com/w/cpp/utility/functional/ptr_fun">std::ptr_fun</a> (c++03), <a href="http://en.cppreference.com/w/cpp/utility/functional/ref">std::ref</a> (c++11), or <a href="http://en.cppreference.com/w/cpp/language/lambda">lambdas</a> (c++11):
+It is also very simple to apply any user defined function \c foo using DenseBase::unaryExpr together with <a href="http://en.cppreference.com/w/cpp/utility/functional/ptr_fun">std::ptr_fun</a> (c++03, deprecated or removed in newer C++ versions), <a href="http://en.cppreference.com/w/cpp/utility/functional/ref">std::ref</a> (c++11), or <a href="http://en.cppreference.com/w/cpp/language/lambda">lambdas</a> (c++11):
\code
mat1.unaryExpr(std::ptr_fun(foo));
mat1.unaryExpr(std::ref(foo));
mat1.unaryExpr([](double x) { return foo(x); });
\endcode
+Please note that it's not possible to pass a raw function pointer to \c unaryExpr, so please warp it as shown above.
<a href="#" class="top">top</a>
\section QuickRef_Reductions Reductions
@@ -521,6 +530,12 @@ if((array1 < array2).any()) ... // if there exist a pair i,j such that array1(i,
<a href="#" class="top">top</a>\section QuickRef_Blocks Sub-matrices
+<div class="warningbox">
+<strong>PLEASE HELP US IMPROVING THIS SECTION.</strong>
+%Eigen 3.4 supports a much improved API for sub-matrices, including,
+slicing and indexing from arrays: \ref TutorialSlicingIndexing
+</div>
+
Read-write access to a \link DenseBase::col(Index) column \endlink
or a \link DenseBase::row(Index) row \endlink of a matrix (or array):
\code
@@ -576,6 +591,11 @@ Read-write access to sub-matrices:</td></tr>
<a href="#" class="top">top</a>\section QuickRef_Misc Miscellaneous operations
+<div class="warningbox">
+<strong>PLEASE HELP US IMPROVING THIS SECTION.</strong>
+%Eigen 3.4 supports a new API for reshaping: \ref TutorialReshape
+</div>
+
\subsection QuickRef_Reverse Reverse
Vectors, rows, and/or columns of a matrix can be reversed (see DenseBase::reverse(), DenseBase::reverseInPlace(), VectorwiseOp::reverse()).
\code
diff --git a/doc/QuickStartGuide.dox b/doc/QuickStartGuide.dox
index ea32c3b3d..4192b28b7 100644
--- a/doc/QuickStartGuide.dox
+++ b/doc/QuickStartGuide.dox
@@ -66,9 +66,9 @@ The output is as follows:
\section GettingStartedExplanation2 Explanation of the second example
-The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetics.
+The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetic.
-The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left unitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows:
+The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left uninitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows:
\f[
v =
diff --git a/doc/SparseLinearSystems.dox b/doc/SparseLinearSystems.dox
index fc33b93e7..38754e4af 100644
--- a/doc/SparseLinearSystems.dox
+++ b/doc/SparseLinearSystems.dox
@@ -70,6 +70,9 @@ They are summarized in the following tables:
<tr><td>UmfPackLU</td><td>\link UmfPackSupport_Module UmfPackSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
<td>Requires the <a href="http://www.suitesparse.com">SuiteSparse</a> package, \b GPL </td>
<td></td></tr>
+<tr><td>KLU</td><td>\link KLUSupport_Module KLUSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, suitted for circuit simulation</td>
+ <td>Requires the <a href="http://www.suitesparse.com">SuiteSparse</a> package, \b GPL </td>
+ <td></td></tr>
<tr><td>SuperLU</td><td>\link SuperLUSupport_Module SuperLUSupport \endlink</td><td>Direct LU factorization</td><td>Square</td><td>Fill-in reducing, Leverage fast dense algebra</td>
<td>Requires the <a href="http://crd-legacy.lbl.gov/~xiaoye/SuperLU/">SuperLU</a> library, (BSD-like)</td>
<td></td></tr>
diff --git a/doc/SparseQuickReference.dox b/doc/SparseQuickReference.dox
index a25622e80..9779f3f9c 100644
--- a/doc/SparseQuickReference.dox
+++ b/doc/SparseQuickReference.dox
@@ -80,7 +80,7 @@ sm1.setZero();
\section SparseBasicInfos Matrix properties
-Beyond the basic functions rows() and cols(), there are some useful functions that are available to easily get some informations from the matrix.
+Beyond the basic functions rows() and cols(), there are some useful functions that are available to easily get some information from the matrix.
<table class="manual">
<tr>
<td> \code
@@ -244,7 +244,7 @@ As stated earlier, for a read-write sub-matrix (RW), the evaluation can be done
<td>
\code
sm1.valuePtr(); // Pointer to the values
-sm1.innerIndextr(); // Pointer to the indices.
+sm1.innerIndexPtr(); // Pointer to the indices.
sm1.outerIndexPtr(); // Pointer to the beginning of each inner vector
\endcode
</td>
diff --git a/doc/StlContainers.dox b/doc/StlContainers.dox
index e0f8714a9..0342573d0 100644
--- a/doc/StlContainers.dox
+++ b/doc/StlContainers.dox
@@ -6,31 +6,39 @@ namespace Eigen {
\section StlContainers_summary Executive summary
-Using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", or classes having members of such types, requires taking the following two steps:
+If you're compiling in \cpp17 mode only with a sufficiently recent compiler (e.g., GCC>=7, clang>=5, MSVC>=19.12), then everything is taken care by the compiler and you can stop reading.
-\li A 16-byte-aligned allocator must be used. Eigen does provide one ready for use: aligned_allocator.
-\li If you want to use the std::vector container, you need to \#include <Eigen/StdVector>.
+Otherwise, using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", or classes having members of such types, requires the use of an over-aligned allocator.
+That is, an allocator capable of allocating buffers with 16, 32, or even 64 bytes alignment.
+%Eigen does provide one ready for use: aligned_allocator.
-These issues arise only with \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member". For other Eigen types, such as Vector3f or MatrixXd, no special care is needed when using STL containers.
+Prior to \cpp11, if you want to use the `std::vector` container, then you also have to <code> \#include <Eigen/StdVector> </code>.
+
+These issues arise only with \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
+For other %Eigen types, such as Vector3f or MatrixXd, no special care is needed when using STL containers.
\section allocator Using an aligned allocator
-STL containers take an optional template parameter, the allocator type. When using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you need tell the container to use an allocator that will always allocate memory at 16-byte-aligned locations. Fortunately, Eigen does provide such an allocator: Eigen::aligned_allocator.
+STL containers take an optional template parameter, the allocator type. When using STL containers on \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you need tell the container to use an allocator that will always allocate memory at 16-byte-aligned (or more) locations. Fortunately, %Eigen does provide such an allocator: Eigen::aligned_allocator.
For example, instead of
\code
-std::map<int, Eigen::Vector4f>
+std::map<int, Eigen::Vector4d>
\endcode
you need to use
\code
-std::map<int, Eigen::Vector4f, std::less<int>,
- Eigen::aligned_allocator<std::pair<const int, Eigen::Vector4f> > >
+std::map<int, Eigen::Vector4d, std::less<int>,
+ Eigen::aligned_allocator<std::pair<const int, Eigen::Vector4d> > >
\endcode
-Note that the third parameter "std::less<int>" is just the default value, but we have to include it because we want to specify the fourth parameter, which is the allocator type.
+Note that the third parameter `std::less<int>` is just the default value, but we have to include it because we want to specify the fourth parameter, which is the allocator type.
\section StlContainers_vector The case of std::vector
-The situation with std::vector was even worse (explanation below) so we had to specialize it for the Eigen::aligned_allocator type. In practice you \b must use the Eigen::aligned_allocator (not another aligned allocator), \b and \#include <Eigen/StdVector>.
+This section is for c++98/03 users only. \cpp11 (or above) users can stop reading here.
+
+So in c++98/03, the situation with `std::vector` is more complicated because of a bug in the standard (explanation below).
+To workaround the issue, we had to specialize it for the Eigen::aligned_allocator type.
+In practice you \b must use the Eigen::aligned_allocator (not another aligned allocator), \b and \#include <Eigen/StdVector>.
Here is an example:
\code
@@ -39,12 +47,16 @@ Here is an example:
std::vector<Eigen::Vector4f,Eigen::aligned_allocator<Eigen::Vector4f> >
\endcode
+<span class="note">\b Explanation: The `resize()` method of `std::vector` takes a `value_type` argument (defaulting to `value_type()`). So with `std::vector<Eigen::Vector4d>`, some Eigen::Vector4d objects will be passed by value, which discards any alignment modifiers, so a Eigen::Vector4d can be created at an unaligned location.
+In order to avoid that, the only solution we saw was to specialize `std::vector` to make it work on a slight modification of, here, Eigen::Vector4d, that is able to deal properly with this situation.
+</span>
+
\subsection vector_spec An alternative - specializing std::vector for Eigen types
As an alternative to the recommended approach described above, you have the option to specialize std::vector for Eigen types requiring alignment.
-The advantage is that you won't need to declare std::vector all over with Eigen::allocator. One drawback on the other hand side is that
-the specialization needs to be defined before all code pieces in which e.g. std::vector<Vector2d> is used. Otherwise, without knowing the specialization
-the compiler will compile that particular instance with the default std::allocator and you program is most likely to crash.
+The advantage is that you won't need to declare std::vector all over with Eigen::aligned_allocator. One drawback on the other hand side is that
+the specialization needs to be defined before all code pieces in which e.g. `std::vector<Vector2d>` is used. Otherwise, without knowing the specialization
+the compiler will compile that particular instance with the default `std::allocator` and you program is most likely to crash.
Here is an example:
\code
@@ -54,8 +66,7 @@ EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(Matrix2d)
std::vector<Eigen::Vector2d>
\endcode
-<span class="note">\b Explanation: The resize() method of std::vector takes a value_type argument (defaulting to value_type()). So with std::vector<Eigen::Vector4f>, some Eigen::Vector4f objects will be passed by value, which discards any alignment modifiers, so a Eigen::Vector4f can be created at an unaligned location. In order to avoid that, the only solution we saw was to specialize std::vector to make it work on a slight modification of, here, Eigen::Vector4f, that is able to deal properly with this situation.
-</span>
+
*/
diff --git a/doc/StructHavingEigenMembers.dox b/doc/StructHavingEigenMembers.dox
index 7fbed0eb0..87016cdc9 100644
--- a/doc/StructHavingEigenMembers.dox
+++ b/doc/StructHavingEigenMembers.dox
@@ -6,7 +6,12 @@ namespace Eigen {
\section StructHavingEigenMembers_summary Executive Summary
-If you define a structure having members of \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you must overload its "operator new" so that it generates 16-bytes-aligned pointers. Fortunately, %Eigen provides you with a macro EIGEN_MAKE_ALIGNED_OPERATOR_NEW that does that for you.
+
+If you define a structure having members of \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types", you must ensure that calling operator new on it allocates properly aligned buffers.
+If you're compiling in \cpp17 mode only with a sufficiently recent compiler (e.g., GCC>=7, clang>=5, MSVC>=19.12), then everything is taken care by the compiler and you can stop reading.
+
+Otherwise, you have to overload its `operator new` so that it generates properly aligned pointers (e.g., 32-bytes-aligned for Vector4d and AVX).
+Fortunately, %Eigen provides you with a macro `EIGEN_MAKE_ALIGNED_OPERATOR_NEW` that does that for you.
\section StructHavingEigenMembers_what What kind of code needs to be changed?
@@ -29,13 +34,13 @@ In other words: you have a class that has as a member a \ref TopicFixedSizeVecto
\section StructHavingEigenMembers_how How should such code be modified?
-Very easy, you just need to put a EIGEN_MAKE_ALIGNED_OPERATOR_NEW macro in a public part of your class, like this:
+Very easy, you just need to put a `EIGEN_MAKE_ALIGNED_OPERATOR_NEW` macro in a public part of your class, like this:
\code
class Foo
{
...
- Eigen::Vector2d v;
+ Eigen::Vector4d v;
...
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
@@ -46,7 +51,9 @@ public:
Foo *foo = new Foo;
\endcode
-This macro makes "new Foo" always return an aligned pointer.
+This macro makes `new Foo` always return an aligned pointer.
+
+In \cpp17, this macro is empty.
If this approach is too intrusive, see also the \ref StructHavingEigenMembers_othersolutions "other solutions".
@@ -58,7 +65,7 @@ OK let's say that your code looks like this:
class Foo
{
...
- Eigen::Vector2d v;
+ Eigen::Vector4d v;
...
};
@@ -67,45 +74,59 @@ class Foo
Foo *foo = new Foo;
\endcode
-A Eigen::Vector2d consists of 2 doubles, which is 128 bits. Which is exactly the size of a SSE packet, which makes it possible to use SSE for all sorts of operations on this vector. But SSE instructions (at least the ones that %Eigen uses, which are the fast ones) require 128-bit alignment. Otherwise you get a segmentation fault.
+A Eigen::Vector4d consists of 4 doubles, which is 256 bits.
+This is exactly the size of an AVX register, which makes it possible to use AVX for all sorts of operations on this vector.
+But AVX instructions (at least the ones that %Eigen uses, which are the fast ones) require 256-bit alignment.
+Otherwise you get a segmentation fault.
-For this reason, Eigen takes care by itself to require 128-bit alignment for Eigen::Vector2d, by doing two things:
-\li Eigen requires 128-bit alignment for the Eigen::Vector2d's array (of 2 doubles). With GCC, this is done with a __attribute__ ((aligned(16))).
-\li Eigen overloads the "operator new" of Eigen::Vector2d so it will always return 128-bit aligned pointers.
+For this reason, %Eigen takes care by itself to require 256-bit alignment for Eigen::Vector4d, by doing two things:
+\li %Eigen requires 256-bit alignment for the Eigen::Vector4d's array (of 4 doubles). With \cpp11 this is done with the <a href="https://en.cppreference.com/w/cpp/keyword/alignas">alignas</a> keyword, or compiler's extensions for c++98/03.
+\li %Eigen overloads the `operator new` of Eigen::Vector4d so it will always return 256-bit aligned pointers. (removed in \cpp17)
-Thus, normally, you don't have to worry about anything, Eigen handles alignment for you...
+Thus, normally, you don't have to worry about anything, %Eigen handles alignment of operator new for you...
-... except in one case. When you have a class Foo like above, and you dynamically allocate a new Foo as above, then, since Foo doesn't have aligned "operator new", the returned pointer foo is not necessarily 128-bit aligned.
+... except in one case. When you have a `class Foo` like above, and you dynamically allocate a new `Foo` as above, then, since `Foo` doesn't have aligned `operator new`, the returned pointer foo is not necessarily 256-bit aligned.
-The alignment attribute of the member v is then relative to the start of the class, foo. If the foo pointer wasn't aligned, then foo->v won't be aligned either!
+The alignment attribute of the member `v` is then relative to the start of the class `Foo`. If the `foo` pointer wasn't aligned, then `foo->v` won't be aligned either!
-The solution is to let class Foo have an aligned "operator new", as we showed in the previous section.
+The solution is to let `class Foo` have an aligned `operator new`, as we showed in the previous section.
+
+This explanation also holds for SSE/NEON/MSA/Altivec/VSX targets, which require 16-bytes alignment, and AVX512 which requires 64-bytes alignment for fixed-size objects multiple of 64 bytes (e.g., Eigen::Matrix4d).
\section StructHavingEigenMembers_movetotop Should I then put all the members of Eigen types at the beginning of my class?
-That's not required. Since Eigen takes care of declaring 128-bit alignment, all members that need it are automatically 128-bit aligned relatively to the class. So code like this works fine:
+That's not required. Since %Eigen takes care of declaring adequate alignment, all members that need it are automatically aligned relatively to the class. So code like this works fine:
\code
class Foo
{
double x;
- Eigen::Vector2d v;
+ Eigen::Vector4d v;
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
\endcode
+That said, as usual, it is recommended to sort the members so that alignment does not waste memory.
+In the above example, with AVX, the compiler will have to reserve 24 empty bytes between `x` and `v`.
+
+
\section StructHavingEigenMembers_dynamicsize What about dynamic-size matrices and vectors?
Dynamic-size matrices and vectors, such as Eigen::VectorXd, allocate dynamically their own array of coefficients, so they take care of requiring absolute alignment automatically. So they don't cause this issue. The issue discussed here is only with \ref TopicFixedSizeVectorizable "fixed-size vectorizable matrices and vectors".
+
\section StructHavingEigenMembers_bugineigen So is this a bug in Eigen?
-No, it's not our bug. It's more like an inherent problem of the C++98 language specification, and seems to be taken care of in the upcoming language revision: <a href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2341.pdf">see this document</a>.
+No, it's not our bug. It's more like an inherent problem of the c++ language specification that has been solved in c++17 through the feature known as <a href="http://wg21.link/p0035r4">dynamic memory allocation for over-aligned data</a>.
+
-\section StructHavingEigenMembers_conditional What if I want to do this conditionnally (depending on template parameters) ?
+\section StructHavingEigenMembers_conditional What if I want to do this conditionally (depending on template parameters) ?
-For this situation, we offer the macro EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign). It will generate aligned operators like EIGEN_MAKE_ALIGNED_OPERATOR_NEW if NeedsToAlign is true. It will generate operators with the default alignment if NeedsToAlign is false.
+For this situation, we offer the macro `EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)`.
+It will generate aligned operators like `EIGEN_MAKE_ALIGNED_OPERATOR_NEW` if `NeedsToAlign` is true.
+It will generate operators with the default alignment if `NeedsToAlign` is false.
+In \cpp17, this macro is empty.
Example:
@@ -130,7 +151,7 @@ Foo<3> *foo3 = new Foo<3>; // foo3 has only the system default alignment guarant
\section StructHavingEigenMembers_othersolutions Other solutions
-In case putting the EIGEN_MAKE_ALIGNED_OPERATOR_NEW macro everywhere is too intrusive, there exists at least two other solutions.
+In case putting the `EIGEN_MAKE_ALIGNED_OPERATOR_NEW` macro everywhere is too intrusive, there exists at least two other solutions.
\subsection othersolutions1 Disabling alignment
@@ -139,22 +160,13 @@ The first is to disable alignment requirement for the fixed size members:
class Foo
{
...
- Eigen::Matrix<double,2,1,Eigen::DontAlign> v;
+ Eigen::Matrix<double,4,1,Eigen::DontAlign> v;
...
};
\endcode
-This has for effect to disable vectorization when using \c v.
-If a function of Foo uses it several times, then it still possible to re-enable vectorization by copying it into an aligned temporary vector:
-\code
-void Foo::bar()
-{
- Eigen::Vector2d av(v);
- // use av instead of v
- ...
- // if av changed, then do:
- v = av;
-}
-\endcode
+This `v` is fully compatible with aligned Eigen::Vector4d.
+This has only for effect to make load/stores to `v` more expensive (usually slightly, but that's hardware dependent).
+
\subsection othersolutions2 Private structure
@@ -164,7 +176,7 @@ The second consist in storing the fixed-size objects into a private struct which
struct Foo_d
{
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
- Vector2d v;
+ Vector4d v;
...
};
@@ -183,7 +195,8 @@ private:
};
\endcode
-The clear advantage here is that the class Foo remains unchanged regarding alignment issues. The drawback is that a heap allocation will be required whatsoever.
+The clear advantage here is that the class `Foo` remains unchanged regarding alignment issues.
+The drawback is that an additional heap allocation will be required whatsoever.
*/
diff --git a/doc/TemplateKeyword.dox b/doc/TemplateKeyword.dox
index b84cfdae9..fbf2c7081 100644
--- a/doc/TemplateKeyword.dox
+++ b/doc/TemplateKeyword.dox
@@ -76,7 +76,7 @@ point where the template is defined, without knowing the actual value of the tem
and \c Derived2 in the example). That means that the compiler cannot know that <tt>dst.triangularView</tt> is
a member template and that the following &lt; symbol is part of the delimiter for the template
parameter. Another possibility would be that <tt>dst.triangularView</tt> is a member variable with the &lt;
-symbol refering to the <tt>operator&lt;()</tt> function. In fact, the compiler should choose the second
+symbol referring to the <tt>operator&lt;()</tt> function. In fact, the compiler should choose the second
possibility, according to the standard. If <tt>dst.triangularView</tt> is a member template (as in our case),
the programmer should specify this explicitly with the \c template keyword and write <tt>dst.template
triangularView</tt>.
diff --git a/doc/TopicCMakeGuide.dox b/doc/TopicCMakeGuide.dox
index 896cfa831..cf767d0dd 100644
--- a/doc/TopicCMakeGuide.dox
+++ b/doc/TopicCMakeGuide.dox
@@ -32,9 +32,13 @@ which requires at least version 3.3 of %Eigen. Here, `path-to-example-directory`
is the path to the directory that contains both `CMakeLists.txt` and
`example.cpp`.
-If you have multiple installed version of %Eigen, you can pick your favorite one by setting the \c Eigen3_DIR cmake's variable to the respective path containing the \c Eigen3*.cmake files. For instance:
-\code
-cmake path-to-example-directory -DEigen3_DIR=$HOME/mypackages/share/eigen3/cmake/
+Do not forget to set the <a href="https://cmake.org/cmake/help/v3.7/variable/CMAKE_PREFIX_PATH.html">\c CMAKE_PREFIX_PATH </a> variable if Eigen is not installed in a default location or if you want to pick a specific version. For instance:
+\code{.sh}
+$ cmake path-to-example-directory -DCMAKE_PREFIX_PATH=$HOME/mypackages
+\endcode
+An alternative is to set the \c Eigen3_DIR cmake's variable to the respective path containing the \c Eigen3*.cmake files. For instance:
+\code{.sh}
+$ cmake path-to-example-directory -DEigen3_DIR=$HOME/mypackages/share/eigen3/cmake/
\endcode
If the `REQUIRED` option is omitted when locating %Eigen using
diff --git a/doc/TopicLazyEvaluation.dox b/doc/TopicLazyEvaluation.dox
index 101ef8c72..d2a704f13 100644
--- a/doc/TopicLazyEvaluation.dox
+++ b/doc/TopicLazyEvaluation.dox
@@ -2,63 +2,95 @@ namespace Eigen {
/** \page TopicLazyEvaluation Lazy Evaluation and Aliasing
-Executive summary: Eigen has intelligent compile-time mechanisms to enable lazy evaluation and removing temporaries where appropriate.
+Executive summary: %Eigen has intelligent compile-time mechanisms to enable lazy evaluation and removing temporaries where appropriate.
It will handle aliasing automatically in most cases, for example with matrix products. The automatic behavior can be overridden
manually by using the MatrixBase::eval() and MatrixBase::noalias() methods.
When you write a line of code involving a complex expression such as
-\code mat1 = mat2 + mat3 * (mat4 + mat5); \endcode
+\code mat1 = mat2 + mat3 * (mat4 + mat5);
+\endcode
-Eigen determines automatically, for each sub-expression, whether to evaluate it into a temporary variable. Indeed, in certain cases it is better to evaluate immediately a sub-expression into a temporary variable, while in other cases it is better to avoid that.
+%Eigen determines automatically, for each sub-expression, whether to evaluate it into a temporary variable. Indeed, in certain cases it is better to evaluate a sub-expression into a temporary variable, while in other cases it is better to avoid that.
A traditional math library without expression templates always evaluates all sub-expressions into temporaries. So with this code,
-\code vec1 = vec2 + vec3; \endcode
+\code vec1 = vec2 + vec3;
+\endcode
a traditional library would evaluate \c vec2 + vec3 into a temporary \c vec4 and then copy \c vec4 into \c vec1. This is of course inefficient: the arrays are traversed twice, so there are a lot of useless load/store operations.
-Expression-templates-based libraries can avoid evaluating sub-expressions into temporaries, which in many cases results in large speed improvements. This is called <i>lazy evaluation</i> as an expression is getting evaluated as late as possible, instead of immediately. However, most other expression-templates-based libraries <i>always</i> choose lazy evaluation. There are two problems with that: first, lazy evaluation is not always a good choice for performance; second, lazy evaluation can be very dangerous, for example with matrix products: doing <tt>matrix = matrix*matrix</tt> gives a wrong result if the matrix product is lazy-evaluated, because of the way matrix product works.
+Expression-templates-based libraries can avoid evaluating sub-expressions into temporaries, which in many cases results in large speed improvements.
+This is called <i>lazy evaluation</i> as an expression is getting evaluated as late as possible.
+In %Eigen <b>all expressions are lazy-evaluated</b>.
+More precisely, an expression starts to be evaluated once it is assigned to a matrix.
+Until then nothing happens beyond constructing the abstract expression tree.
+In contrast to most other expression-templates-based libraries, however, <b>%Eigen might choose to evaluate some sub-expressions into temporaries</b>.
+There are two reasons for that: first, pure lazy evaluation is not always a good choice for performance; second, pure lazy evaluation can be very dangerous, for example with matrix products: doing <tt>mat = mat*mat</tt> gives a wrong result if the matrix product is directly evaluated within the destination matrix, because of the way matrix product works.
-For these reasons, Eigen has intelligent compile-time mechanisms to determine automatically when to use lazy evaluation, and when on the contrary it should evaluate immediately into a temporary variable.
+For these reasons, %Eigen has intelligent compile-time mechanisms to determine automatically which sub-expression should be evaluated into a temporary variable.
So in the basic example,
-\code matrix1 = matrix2 + matrix3; \endcode
+\code mat1 = mat2 + mat3;
+\endcode
-Eigen chooses lazy evaluation. Thus the arrays are traversed only once, producing optimized code. If you really want to force immediate evaluation, use \link MatrixBase::eval() eval()\endlink:
+%Eigen chooses not to introduce any temporary. Thus the arrays are traversed only once, producing optimized code.
+If you really want to force immediate evaluation, use \link MatrixBase::eval() eval()\endlink:
-\code matrix1 = (matrix2 + matrix3).eval(); \endcode
+\code mat1 = (mat2 + mat3).eval();
+\endcode
Here is now a more involved example:
-\code matrix1 = -matrix2 + matrix3 + 5 * matrix4; \endcode
+\code mat1 = -mat2 + mat3 + 5 * mat4;
+\endcode
-Eigen chooses lazy evaluation at every stage in that example, which is clearly the correct choice. In fact, lazy evaluation is the "default choice" and Eigen will choose it except in a few circumstances.
+Here again %Eigen won't introduce any temporary, thus producing a single <b>fused</b> evaluation loop, which is clearly the correct choice.
-<b>The first circumstance</b> in which Eigen chooses immediate evaluation, is when it sees an assignment <tt>a = b;</tt> and the expression \c b has the evaluate-before-assigning \link flags flag\endlink. The most important example of such an expression is the \link Product matrix product expression\endlink. For example, when you do
+\section TopicLazyEvaluationWhichExpr Which sub-expressions are evaluated into temporaries?
-\code matrix = matrix * matrix; \endcode
+The default evaluation strategy is to fuse the operations in a single loop, and %Eigen will choose it except in a few circumstances.
-Eigen first evaluates <tt>matrix * matrix</tt> into a temporary matrix, and then copies it into the original \c matrix. This guarantees a correct result as we saw above that lazy evaluation gives wrong results with matrix products. It also doesn't cost much, as the cost of the matrix product itself is much higher.
+<b>The first circumstance</b> in which %Eigen chooses to evaluate a sub-expression is when it sees an assignment <tt>a = b;</tt> and the expression \c b has the evaluate-before-assigning \link flags flag\endlink.
+The most important example of such an expression is the \link Product matrix product expression\endlink. For example, when you do
+
+\code mat = mat * mat;
+\endcode
+
+%Eigen will evaluate <tt>mat * mat</tt> into a temporary matrix, and then copies it into the original \c mat.
+This guarantees a correct result as we saw above that lazy evaluation gives wrong results with matrix products.
+It also doesn't cost much, as the cost of the matrix product itself is much higher.
+Note that this temporary is introduced at evaluation time only, that is, within operator= in this example.
+The expression <tt>mat * mat</tt> still return a abstract product type.
What if you know that the result does no alias the operand of the product and want to force lazy evaluation? Then use \link MatrixBase::noalias() .noalias()\endlink instead. Here is an example:
-\code matrix1.noalias() = matrix2 * matrix2; \endcode
+\code mat1.noalias() = mat2 * mat2;
+\endcode
-Here, since we know that matrix2 is not the same matrix as matrix1, we know that lazy evaluation is not dangerous, so we may force lazy evaluation. Concretely, the effect of noalias() here is to bypass the evaluate-before-assigning \link flags flag\endlink.
+Here, since we know that mat2 is not the same matrix as mat1, we know that lazy evaluation is not dangerous, so we may force lazy evaluation. Concretely, the effect of noalias() here is to bypass the evaluate-before-assigning \link flags flag\endlink.
-<b>The second circumstance</b> in which Eigen chooses immediate evaluation, is when it sees a nested expression such as <tt>a + b</tt> where \c b is already an expression having the evaluate-before-nesting \link flags flag\endlink. Again, the most important example of such an expression is the \link Product matrix product expression\endlink. For example, when you do
+<b>The second circumstance</b> in which %Eigen chooses to evaluate a sub-expression, is when it sees a nested expression such as <tt>a + b</tt> where \c b is already an expression having the evaluate-before-nesting \link flags flag\endlink.
+Again, the most important example of such an expression is the \link Product matrix product expression\endlink.
+For example, when you do
-\code matrix1 = matrix2 + matrix3 * matrix4; \endcode
+\code mat1 = mat2 * mat3 + mat4 * mat5;
+\endcode
-the product <tt>matrix3 * matrix4</tt> gets evaluated immediately into a temporary matrix. Indeed, experiments showed that it is often beneficial for performance to evaluate immediately matrix products when they are nested into bigger expressions.
+the products <tt>mat2 * mat3</tt> and <tt>mat4 * mat5</tt> gets evaluated separately into temporary matrices before being summed up in <tt>mat1</tt>.
+Indeed, to be efficient matrix products need to be evaluated within a destination matrix at hand, and not as simple "dot products".
+For small matrices, however, you might want to enforce a "dot-product" based lazy evaluation with lazyProduct().
+Again, it is important to understand that those temporaries are created at evaluation time only, that is in operator =.
+See TopicPitfalls_auto_keyword for common pitfalls regarding this remark.
-<b>The third circumstance</b> in which Eigen chooses immediate evaluation, is when its cost model shows that the total cost of an operation is reduced if a sub-expression gets evaluated into a temporary. Indeed, in certain cases, an intermediate result is sufficiently costly to compute and is reused sufficiently many times, that is worth "caching". Here is an example:
+<b>The third circumstance</b> in which %Eigen chooses to evaluate a sub-expression, is when its cost model shows that the total cost of an operation is reduced if a sub-expression gets evaluated into a temporary.
+Indeed, in certain cases, an intermediate result is sufficiently costly to compute and is reused sufficiently many times, that is worth "caching". Here is an example:
-\code matrix1 = matrix2 * (matrix3 + matrix4); \endcode
+\code mat1 = mat2 * (mat3 + mat4);
+\endcode
-Here, provided the matrices have at least 2 rows and 2 columns, each coefficienct of the expression <tt>matrix3 + matrix4</tt> is going to be used several times in the matrix product. Instead of computing the sum everytime, it is much better to compute it once and store it in a temporary variable. Eigen understands this and evaluates <tt>matrix3 + matrix4</tt> into a temporary variable before evaluating the product.
+Here, provided the matrices have at least 2 rows and 2 columns, each coefficient of the expression <tt>mat3 + mat4</tt> is going to be used several times in the matrix product. Instead of computing the sum every time, it is much better to compute it once and store it in a temporary variable. %Eigen understands this and evaluates <tt>mat3 + mat4</tt> into a temporary variable before evaluating the product.
*/
diff --git a/doc/TopicLinearAlgebraDecompositions.dox b/doc/TopicLinearAlgebraDecompositions.dox
index 491470627..402b3769e 100644
--- a/doc/TopicLinearAlgebraDecompositions.dox
+++ b/doc/TopicLinearAlgebraDecompositions.dox
@@ -4,7 +4,7 @@ namespace Eigen {
This page presents a catalogue of the dense matrix decompositions offered by Eigen.
For an introduction on linear solvers and decompositions, check this \link TutorialLinearAlgebra page \endlink.
-To get an overview of the true relative speed of the different decomposition, check this \link DenseDecompositionBenchmark benchmark \endlink.
+To get an overview of the true relative speed of the different decompositions, check this \link DenseDecompositionBenchmark benchmark \endlink.
\section TopicLinAlgBigTable Catalogue of decompositions offered by Eigen
@@ -72,7 +72,7 @@ To get an overview of the true relative speed of the different decomposition, ch
<td>Orthogonalization</td>
<td>Yes</td>
<td>Excellent</td>
- <td><em>Soon: blocking</em></td>
+ <td><em>-</em></td>
</tr>
<tr>
@@ -88,6 +88,18 @@ To get an overview of the true relative speed of the different decomposition, ch
</tr>
<tr class="alt">
+ <td>CompleteOrthogonalDecomposition</td>
+ <td>-</td>
+ <td>Fast</td>
+ <td>Good</td>
+ <td>Yes</td>
+ <td>Orthogonalization</td>
+ <td>Yes</td>
+ <td>Excellent</td>
+ <td><em>-</em></td>
+ </tr>
+
+ <tr>
<td>LLT</td>
<td>Positive definite</td>
<td>Very fast</td>
@@ -99,7 +111,7 @@ To get an overview of the true relative speed of the different decomposition, ch
<td>Blocking</td>
</tr>
- <tr>
+ <tr class="alt">
<td>LDLT</td>
<td>Positive or negative semidefinite<sup><a href="#note1">1</a></sup></td>
<td>Very fast</td>
@@ -114,6 +126,18 @@ To get an overview of the true relative speed of the different decomposition, ch
<tr><th class="inter" colspan="9">\n Singular values and eigenvalues decompositions</th></tr>
<tr>
+ <td>BDCSVD (divide \& conquer)</td>
+ <td>-</td>
+ <td>One of the fastest SVD algorithms</td>
+ <td>Excellent</td>
+ <td>Yes</td>
+ <td>Singular values/vectors, least squares</td>
+ <td>Yes (and does least squares)</td>
+ <td>Excellent</td>
+ <td>Blocked bidiagonalization</td>
+ </tr>
+
+ <tr>
<td>JacobiSVD (two-sided)</td>
<td>-</td>
<td>Slow (but fast for small matrices)</td>
@@ -248,7 +272,7 @@ To get an overview of the true relative speed of the different decomposition, ch
<dt><b>Blocking</b></dt>
<dd>Means the algorithm can work per block, whence guaranteeing a good scaling of the performance for large matrices.</dd>
<dt><b>Implicit Multi Threading (MT)</b></dt>
- <dd>Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algortihm itself is not parallelized, but that it relies on parallelized matrix-matrix product rountines.</dd>
+ <dd>Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algortihm itself is not parallelized, but that it relies on parallelized matrix-matrix product routines.</dd>
<dt><b>Explicit Multi Threading (MT)</b></dt>
<dd>Means the algorithm is explicitly parallelized to take advantage of multicore processors via OpenMP.</dd>
<dt><b>Meta-unroller</b></dt>
diff --git a/doc/TopicMultithreading.dox b/doc/TopicMultithreading.dox
index 47c9b261f..7a8ff301f 100644
--- a/doc/TopicMultithreading.dox
+++ b/doc/TopicMultithreading.dox
@@ -4,22 +4,25 @@ namespace Eigen {
\section TopicMultiThreading_MakingEigenMT Make Eigen run in parallel
-Some Eigen's algorithms can exploit the multiple cores present in your hardware. To this end, it is enough to enable OpenMP on your compiler, for instance:
- * GCC: \c -fopenmp
- * ICC: \c -openmp
- * MSVC: check the respective option in the build properties.
-You can control the number of thread that will be used using either the OpenMP API or Eigen's API using the following priority:
+Some %Eigen's algorithms can exploit the multiple cores present in your hardware.
+To this end, it is enough to enable OpenMP on your compiler, for instance:
+ - GCC: \c -fopenmp
+ - ICC: \c -openmp
+ - MSVC: check the respective option in the build properties.
+
+You can control the number of threads that will be used using either the OpenMP API or %Eigen's API using the following priority:
\code
OMP_NUM_THREADS=n ./my_program
omp_set_num_threads(n);
Eigen::setNbThreads(n);
\endcode
-Unless setNbThreads has been called, Eigen uses the number of threads specified by OpenMP. You can restore this behavior by calling \code setNbThreads(0); \endcode
+Unless `setNbThreads` has been called, %Eigen uses the number of threads specified by OpenMP.
+You can restore this behavior by calling `setNbThreads(0);`.
You can query the number of threads that will be used with:
\code
n = Eigen::nbThreads( );
\endcode
-You can disable Eigen's multi threading at compile time by defining the EIGEN_DONT_PARALLELIZE preprocessor token.
+You can disable %Eigen's multi threading at compile time by defining the \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_PARALLELIZE \endlink preprocessor token.
Currently, the following algorithms can make use of multi-threading:
- general dense matrix - matrix products
@@ -29,9 +32,17 @@ Currently, the following algorithms can make use of multi-threading:
- BiCGSTAB with a row-major sparse matrix format.
- LeastSquaresConjugateGradient
+\warning On most OS it is <strong>very important</strong> to limit the number of threads to the number of physical cores, otherwise significant slowdowns are expected, especially for operations involving dense matrices.
+
+Indeed, the principle of hyper-threading is to run multiple threads (in most cases 2) on a single core in an interleaved manner.
+However, %Eigen's matrix-matrix product kernel is fully optimized and already exploits nearly 100% of the CPU capacity.
+Consequently, there is no room for running multiple such threads on a single core, and the performance would drops significantly because of cache pollution and other sources of overheads.
+At this stage of reading you're probably wondering why %Eigen does not limit itself to the number of physical cores?
+This is simply because OpenMP does not allow to know the number of physical cores, and thus %Eigen will launch as many threads as <i>cores</i> reported by OpenMP.
+
\section TopicMultiThreading_UsingEigenWithMT Using Eigen in a multi-threaded application
-In the case your own application is multithreaded, and multiple threads make calls to Eigen, then you have to initialize Eigen by calling the following routine \b before creating the threads:
+In the case your own application is multithreaded, and multiple threads make calls to %Eigen, then you have to initialize %Eigen by calling the following routine \b before creating the threads:
\code
#include <Eigen/Core>
@@ -43,12 +54,14 @@ int main(int argc, char** argv)
}
\endcode
-\note With Eigen 3.3, and a fully C++11 compliant compiler (i.e., <a href="http://en.cppreference.com/w/cpp/language/storage_duration#Static_local_variables">thread-safe static local variable initialization</a>), then calling \c initParallel() is optional.
+\note With %Eigen 3.3, and a fully C++11 compliant compiler (i.e., <a href="http://en.cppreference.com/w/cpp/language/storage_duration#Static_local_variables">thread-safe static local variable initialization</a>), then calling \c initParallel() is optional.
-\warning note that all functions generating random matrices are \b not re-entrant nor thread-safe. Those include DenseBase::Random(), and DenseBase::setRandom() despite a call to Eigen::initParallel(). This is because these functions are based on std::rand which is not re-entrant. For thread-safe random generator, we recommend the use of boost::random or c++11 random feature.
+\warning Note that all functions generating random matrices are \b not re-entrant nor thread-safe. Those include DenseBase::Random(), and DenseBase::setRandom() despite a call to `Eigen::initParallel()`. This is because these functions are based on `std::rand` which is not re-entrant.
+For thread-safe random generator, we recommend the use of c++11 random generators (\link DenseBase::NullaryExpr(Index, const CustomNullaryOp&) example \endlink) or `boost::random`.
-In the case your application is parallelized with OpenMP, you might want to disable Eigen's own parallization as detailed in the previous section.
+In the case your application is parallelized with OpenMP, you might want to disable %Eigen's own parallelization as detailed in the previous section.
+\warning Using OpenMP with custom scalar types that might throw exceptions can lead to unexpected behaviour in the event of throwing.
*/
}
diff --git a/doc/TutorialBlockOperations.dox b/doc/TutorialBlockOperations.dox
index a2d8c97cc..df277482c 100644
--- a/doc/TutorialBlockOperations.dox
+++ b/doc/TutorialBlockOperations.dox
@@ -167,6 +167,20 @@ matrix.rightCols(q);\endcode </td>
<td>\code
matrix.rightCols<q>();\endcode </td>
</tr>
+<tr><td>%Block containing the q columns starting from i
+ \link DenseBase::middleCols() * \endlink</td>
+ <td>\code
+matrix.middleCols(i,q);\endcode </td>
+ <td>\code
+matrix.middleCols<q>(i);\endcode </td>
+</tr>
+<tr><td>%Block containing the q rows starting from i
+ \link DenseBase::middleRows() * \endlink</td>
+ <td>\code
+matrix.middleRows(i,q);\endcode </td>
+ <td>\code
+matrix.middleRows<q>(i);\endcode </td>
+</tr>
</table>
Here is a simple example illustrating the use of the operations presented above:
diff --git a/doc/TutorialGeometry.dox b/doc/TutorialGeometry.dox
index 2e1420f98..1d214f355 100644
--- a/doc/TutorialGeometry.dox
+++ b/doc/TutorialGeometry.dox
@@ -111,7 +111,7 @@ rot3 = rot1.slerp(alpha,rot2);\endcode</td></tr>
<a href="#" class="top">top</a>\section TutorialGeoTransform Affine transformations
-Generic affine transformations are represented by the Transform class which internaly
+Generic affine transformations are represented by the Transform class which internally
is a (Dim+1)^2 matrix. In Eigen we have chosen to not distinghish between points and
vectors such that all points are actually represented by displacement vectors from the
origin ( \f$ \mathbf{p} \equiv \mathbf{p}-0 \f$ ). With that in mind, real points and
@@ -232,8 +232,8 @@ On the other hand, since there exist 24 different conventions, they are pretty c
to create a rotation matrix according to the 2-1-2 convention.</td><td>\code
Matrix3f m;
m = AngleAxisf(angle1, Vector3f::UnitZ())
- * AngleAxisf(angle2, Vector3f::UnitY())
- * AngleAxisf(angle3, Vector3f::UnitZ());
+ * * AngleAxisf(angle2, Vector3f::UnitY())
+ * * AngleAxisf(angle3, Vector3f::UnitZ());
\endcode</td></tr>
</table>
diff --git a/doc/TutorialLinearAlgebra.dox b/doc/TutorialLinearAlgebra.dox
index cb92ceeae..8042fcad3 100644
--- a/doc/TutorialLinearAlgebra.dox
+++ b/doc/TutorialLinearAlgebra.dox
@@ -14,7 +14,7 @@ QR, %SVD, eigendecompositions... After reading this page, don't miss our
\f[ Ax \: = \: b \f]
Where \a A and \a b are matrices (\a b could be a vector, as a special case). You want to find a solution \a x.
-\b The \b solution: You can choose between various decompositions, depending on what your matrix \a A looks like,
+\b The \b solution: You can choose between various decompositions, depending on the properties of your matrix \a A,
and depending on whether you favor speed or accuracy. However, let's start with an example that works in all cases,
and is a good compromise:
<table class="example">
@@ -34,7 +34,7 @@ Vector3f x = dec.solve(b);
Here, ColPivHouseholderQR is a QR decomposition with column pivoting. It's a good compromise for this tutorial, as it
works for all matrices while being quite fast. Here is a table of some other decompositions that you can choose from,
-depending on your matrix and the trade-off you want to make:
+depending on your matrix, the problem you are trying to solve, and the trade-off you want to make:
<table class="manual">
<tr>
@@ -73,7 +73,7 @@ depending on your matrix and the trade-off you want to make:
<td>ColPivHouseholderQR</td>
<td>colPivHouseholderQr()</td>
<td>None</td>
- <td>++</td>
+ <td>+</td>
<td>-</td>
<td>+++</td>
</tr>
@@ -86,6 +86,14 @@ depending on your matrix and the trade-off you want to make:
<td>+++</td>
</tr>
<tr class="alt">
+ <td>CompleteOrthogonalDecomposition</td>
+ <td>completeOrthogonalDecomposition()</td>
+ <td>None</td>
+ <td>+</td>
+ <td>-</td>
+ <td>+++</td>
+ </tr>
+ <tr class="alt">
<td>LLT</td>
<td>llt()</td>
<td>Positive definite</td>
@@ -102,20 +110,31 @@ depending on your matrix and the trade-off you want to make:
<td>++</td>
</tr>
<tr class="alt">
+ <td>BDCSVD</td>
+ <td>bdcSvd()</td>
+ <td>None</td>
+ <td>-</td>
+ <td>-</td>
+ <td>+++</td>
+ </tr>
+ <tr class="alt">
<td>JacobiSVD</td>
<td>jacobiSvd()</td>
<td>None</td>
- <td>- -</td>
+ <td>-</td>
<td>- - -</td>
<td>+++</td>
</tr>
</table>
+To get an overview of the true relative speed of the different decompositions, check this \link DenseDecompositionBenchmark benchmark \endlink.
-All of these decompositions offer a solve() method that works as in the above example.
+All of these decompositions offer a solve() method that works as in the above example.
-For example, if your matrix is positive definite, the above table says that a very good
-choice is then the LLT or LDLT decomposition. Here's an example, also demonstrating that using a general
-matrix (not a vector) as right hand side is possible.
+If you know more about the properties of your matrix, you can use the above table to select the best method.
+For example, a good choice for solving linear systems with a non-symmetric matrix of full rank is PartialPivLU.
+If you know that your matrix is also symmetric and positive definite, the above table says that
+a very good choice is the LLT or LDLT decomposition. Here's an example, also demonstrating that using a general
+matrix (not a vector) as right hand side is possible:
<table class="example">
<tr><th>Example:</th><th>Output:</th></tr>
@@ -129,7 +148,34 @@ For a \ref TopicLinearAlgebraDecompositions "much more complete table" comparing
supports many other decompositions), see our special page on
\ref TopicLinearAlgebraDecompositions "this topic".
-\section TutorialLinAlgSolutionExists Checking if a solution really exists
+
+\section TutorialLinAlgLeastsquares Least squares solving
+
+The most general and accurate method to solve under- or over-determined linear systems
+in the least squares sense, is the SVD decomposition. Eigen provides two implementations.
+The recommended one is the BDCSVD class, which scales well for large problems
+and automatically falls back to the JacobiSVD class for smaller problems.
+For both classes, their solve() method solved the linear system in the least-squares
+sense.
+
+Here is an example:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr>
+ <td>\include TutorialLinAlgSVDSolve.cpp </td>
+ <td>\verbinclude TutorialLinAlgSVDSolve.out </td>
+</tr>
+</table>
+
+An alternative to the SVD, which is usually faster and about as accurate, is CompleteOrthogonalDecomposition.
+
+Again, if you know more about the problem, the table above contains methods that are potentially faster.
+If your matrix is full rank, HouseHolderQR is the method of choice. If your matrix is full rank and well conditioned,
+using the Cholesky decomposition (LLT) on the matrix of the normal equations can be faster still.
+Our page on \link LeastSquares least squares solving \endlink has more details.
+
+
+\section TutorialLinAlgSolutionExists Checking if a matrix is singular
Only you know what error margin you want to allow for a solution to be considered valid.
So Eigen lets you do this computation for yourself, if you want to, as in this example:
@@ -162,11 +208,11 @@ very rare. The call to info() is to check for this possibility.
\section TutorialLinAlgInverse Computing inverse and determinant
First of all, make sure that you really want this. While inverse and determinant are fundamental mathematical concepts,
-in \em numerical linear algebra they are not as popular as in pure mathematics. Inverse computations are often
+in \em numerical linear algebra they are not as useful as in pure mathematics. Inverse computations are often
advantageously replaced by solve() operations, and the determinant is often \em not a good way of checking if a matrix
is invertible.
-However, for \em very \em small matrices, the above is not true, and inverse and determinant can be very useful.
+However, for \em very \em small matrices, the above may not be true, and inverse and determinant can be very useful.
While certain decompositions, such as PartialPivLU and FullPivLU, offer inverse() and determinant() methods, you can also
call inverse() and determinant() directly on a matrix. If your matrix is of a very small fixed size (at most 4x4) this
@@ -181,25 +227,6 @@ Here is an example:
</tr>
</table>
-\section TutorialLinAlgLeastsquares Least squares solving
-
-The most accurate method to do least squares solving is with a SVD decomposition. Eigen provides one
-as the JacobiSVD class, and its solve() is doing least-squares solving.
-
-Here is an example:
-<table class="example">
-<tr><th>Example:</th><th>Output:</th></tr>
-<tr>
- <td>\include TutorialLinAlgSVDSolve.cpp </td>
- <td>\verbinclude TutorialLinAlgSVDSolve.out </td>
-</tr>
-</table>
-
-Another methods, potentially faster but less reliable, are to use a Cholesky decomposition of the
-normal matrix or a QR decomposition. Our page on \link LeastSquares least squares solving \endlink
-has more details.
-
-
\section TutorialLinAlgSeparateComputation Separating the computation from the construction
In the above examples, the decomposition was computed at the same time that the decomposition object was constructed.
diff --git a/doc/TutorialMapClass.dox b/doc/TutorialMapClass.dox
index f8fb0fd2f..caa2539d8 100644
--- a/doc/TutorialMapClass.dox
+++ b/doc/TutorialMapClass.dox
@@ -29,9 +29,9 @@ Map<const Vector4i> mi(pi);
\endcode
where \c pi is an \c int \c *. In this case the size does not have to be passed to the constructor, because it is already specified by the Matrix/Array type.
-Note that Map does not have a default constructor; you \em must pass a pointer to intialize the object. However, you can work around this requirement (see \ref TutorialMapPlacementNew).
+Note that Map does not have a default constructor; you \em must pass a pointer to initialize the object. However, you can work around this requirement (see \ref TutorialMapPlacementNew).
-Map is flexible enough to accomodate a variety of different data representations. There are two other (optional) template parameters:
+Map is flexible enough to accommodate a variety of different data representations. There are two other (optional) template parameters:
\code
Map<typename MatrixType,
int MapOptions,
diff --git a/doc/TutorialMatrixClass.dox b/doc/TutorialMatrixClass.dox
index 7ea0cd789..2c452220f 100644
--- a/doc/TutorialMatrixClass.dox
+++ b/doc/TutorialMatrixClass.dox
@@ -101,13 +101,41 @@ Matrix3f a(3,3);
\endcode
and is a no-operation.
-Finally, we also offer some constructors to initialize the coefficients of small fixed-size vectors up to size 4:
+Matrices and vectors can also be initialized from lists of coefficients.
+Prior to C++11, this feature is limited to small fixed-size column or vectors up to size 4:
\code
Vector2d a(5.0, 6.0);
Vector3d b(5.0, 6.0, 7.0);
Vector4d c(5.0, 6.0, 7.0, 8.0);
\endcode
+If C++11 is enabled, fixed-size column or row vectors of arbitrary size can be initialized by passing an arbitrary number of coefficients:
+\code
+Vector2i a(1, 2); // A column vector containing the elements {1, 2}
+Matrix<int, 5, 1> b {1, 2, 3, 4, 5}; // A row-vector containing the elements {1, 2, 3, 4, 5}
+Matrix<int, 1, 5> c = {1, 2, 3, 4, 5}; // A column vector containing the elements {1, 2, 3, 4, 5}
+\endcode
+
+In the general case of matrices and vectors with either fixed or runtime sizes,
+coefficients have to be grouped by rows and passed as an initializer list of initializer list (\link Matrix::Matrix(const std::initializer_list<std::initializer_list<Scalar>>&) details \endlink):
+\code
+MatrixXi a { // construct a 2x2 matrix
+ {1, 2}, // first row
+ {3, 4} // second row
+};
+Matrix<double, 2, 3> b {
+ {2, 3, 4},
+ {5, 6, 7},
+};
+\endcode
+
+For column or row vectors, implicit transposition is allowed.
+This means that a column vector can be initialized from a single row:
+\code
+VectorXd a {{1.5, 2.5, 3.5}}; // A column-vector with 3 coefficients
+RowVectorXd b {{1.0, 2.0, 3.0, 4.0}}; // A row-vector with 4 coefficients
+\endcode
+
\section TutorialMatrixCoeffAccessors Coefficient accessors
The primary coefficient accessors and mutators in Eigen are the overloaded parenthesis operators.
diff --git a/doc/TutorialReshape.dox b/doc/TutorialReshape.dox
new file mode 100644
index 000000000..5b4022a3b
--- /dev/null
+++ b/doc/TutorialReshape.dox
@@ -0,0 +1,82 @@
+namespace Eigen {
+
+/** \eigenManualPage TutorialReshape Reshape
+
+Since the version 3.4, %Eigen exposes convenient methods to reshape a matrix to another matrix of different sizes or vector.
+All cases are handled via the DenseBase::reshaped(NRowsType,NColsType) and DenseBase::reshaped() functions.
+Those functions do not perform in-place reshaping, but instead return a <i> view </i> on the input expression.
+
+\eigenAutoToc
+
+\section TutorialReshapeMat2Mat Reshaped 2D views
+
+The more general reshaping transformation is handled via: `reshaped(nrows,ncols)`.
+Here is an example reshaping a 4x4 matrix to a 2x8 one:
+
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include MatrixBase_reshaped_int_int.cpp
+</td>
+<td>
+\verbinclude MatrixBase_reshaped_int_int.out
+</td></tr></table>
+
+By default, the input coefficients are always interpreted in column-major order regardless of the storage order of the input expression.
+For more control on ordering, compile-time sizes, and automatic size deduction, please see de documentation of DenseBase::reshaped(NRowsType,NColsType) that contains all the details with many examples.
+
+
+\section TutorialReshapeMat2Vec 1D linear views
+
+A very common usage of reshaping is to create a 1D linear view over a given 2D matrix or expression.
+In this case, sizes can be deduced and thus omitted as in the following example:
+
+<table class="example">
+<tr><th>Example:</th></tr>
+<tr><td>
+\include MatrixBase_reshaped_to_vector.cpp
+</td></tr>
+<tr><th>Output:</th></tr>
+<tr><td>
+\verbinclude MatrixBase_reshaped_to_vector.out
+</td></tr></table>
+
+This shortcut always returns a column vector and by default input coefficients are always interpreted in column-major order.
+Again, see the documentation of DenseBase::reshaped() for more control on the ordering.
+
+\section TutorialReshapeInPlace
+
+The above examples create reshaped views, but what about reshaping inplace a given matrix?
+Of course this task in only conceivable for matrix and arrays having runtime dimensions.
+In many cases, this can be accomplished via PlainObjectBase::resize(Index,Index):
+
+<table class="example">
+<tr><th>Example:</th></tr>
+<tr><td>
+\include Tutorial_reshaped_vs_resize_1.cpp
+</td></tr>
+<tr><th>Output:</th></tr>
+<tr><td>
+\verbinclude Tutorial_reshaped_vs_resize_1.out
+</td></tr></table>
+
+However beware that unlike \c reshaped, the result of \c resize depends on the input storage order.
+It thus behaves similarly to `reshaped<AutoOrder>`:
+
+<table class="example">
+<tr><th>Example:</th></tr>
+<tr><td>
+\include Tutorial_reshaped_vs_resize_2.cpp
+</td></tr>
+<tr><th>Output:</th></tr>
+<tr><td>
+\verbinclude Tutorial_reshaped_vs_resize_2.out
+</td></tr></table>
+
+Finally, assigning a reshaped matrix to itself is currently not supported and will result to undefined-behavior because of \link TopicAliasing aliasing \endlink.
+The following is forbidden: \code A = A.reshaped(2,8); \endcode
+This is OK: \code A = A.reshaped(2,8).eval(); \endcode
+
+*/
+
+}
diff --git a/doc/TutorialReshapeSlicing.dox b/doc/TutorialReshapeSlicing.dox
deleted file mode 100644
index 3730a5de6..000000000
--- a/doc/TutorialReshapeSlicing.dox
+++ /dev/null
@@ -1,65 +0,0 @@
-namespace Eigen {
-
-/** \eigenManualPage TutorialReshapeSlicing Reshape and Slicing
-
-%Eigen does not expose convenient methods to take slices or to reshape a matrix yet.
-Nonetheless, such features can easily be emulated using the Map class.
-
-\eigenAutoToc
-
-\section TutorialReshape Reshape
-
-A reshape operation consists in modifying the sizes of a matrix while keeping the same coefficients.
-Instead of modifying the input matrix itself, which is not possible for compile-time sizes, the approach consist in creating a different \em view on the storage using class Map.
-Here is a typical example creating a 1D linear view of a matrix:
-
-<table class="example">
-<tr><th>Example:</th><th>Output:</th></tr>
-<tr><td>
-\include Tutorial_ReshapeMat2Vec.cpp
-</td>
-<td>
-\verbinclude Tutorial_ReshapeMat2Vec.out
-</td></tr></table>
-
-Remark how the storage order of the input matrix modifies the order of the coefficients in the linear view.
-Here is another example reshaping a 2x6 matrix to a 6x2 one:
-<table class="example">
-<tr><th>Example:</th><th>Output:</th></tr>
-<tr><td>
-\include Tutorial_ReshapeMat2Mat.cpp
-</td>
-<td>
-\verbinclude Tutorial_ReshapeMat2Mat.out
-</td></tr></table>
-
-
-
-\section TutorialSlicing Slicing
-
-Slicing consists in taking a set of rows, columns, or elements, uniformly spaced within a matrix.
-Again, the class Map allows to easily mimic this feature.
-
-For instance, one can skip every P elements in a vector:
-<table class="example">
-<tr><th>Example:</th><th>Output:</th></tr>
-<tr><td>
-\include Tutorial_SlicingVec.cpp
-</td>
-<td>
-\verbinclude Tutorial_SlicingVec.out
-</td></tr></table>
-
-One can olso take one column over three using an adequate outer-stride or inner-stride depending on the actual storage order:
-<table class="example">
-<tr><th>Example:</th><th>Output:</th></tr>
-<tr><td>
-\include Tutorial_SlicingCol.cpp
-</td>
-<td>
-\verbinclude Tutorial_SlicingCol.out
-</td></tr></table>
-
-*/
-
-}
diff --git a/doc/TutorialSTL.dox b/doc/TutorialSTL.dox
new file mode 100644
index 000000000..9a825bc48
--- /dev/null
+++ b/doc/TutorialSTL.dox
@@ -0,0 +1,66 @@
+namespace Eigen {
+
+/** \eigenManualPage TutorialSTL STL iterators and algorithms
+
+Since the version 3.4, %Eigen's dense matrices and arrays provide STL compatible iterators.
+As demonstrated below, this makes them naturally compatible with range-for-loops and STL's algorithms.
+
+\eigenAutoToc
+
+\section TutorialSTLVectors Iterating over 1D arrays and vectors
+
+Any dense 1D expressions exposes the pair of `begin()/end()` methods to iterate over them.
+
+This directly enables c++11 range for loops:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Tutorial_range_for_loop_1d_cxx11.cpp
+</td>
+<td>
+\verbinclude Tutorial_range_for_loop_1d_cxx11.out
+</td></tr></table>
+
+One dimensional expressions can also easily be passed to STL algorithms:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Tutorial_std_sort.cpp
+</td>
+<td>
+\verbinclude Tutorial_std_sort.out
+</td></tr></table>
+
+Similar to `std::vector`, 1D expressions also exposes the pair of `cbegin()/cend()` methods to conveniently get const iterators on non-const object.
+
+\section TutorialSTLMatrices Iterating over coefficients of 2D arrays and matrices
+
+STL iterators are intrinsically designed to iterate over 1D structures.
+This is why `begin()/end()` methods are disabled for 2D expressions.
+Iterating over all coefficients of a 2D expressions is still easily accomplished by creating a 1D linear view through `reshaped()`:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Tutorial_range_for_loop_2d_cxx11.cpp
+</td>
+<td>
+\verbinclude Tutorial_range_for_loop_2d_cxx11.out
+</td></tr></table>
+
+\section TutorialSTLRowsColumns Iterating over rows or columns of 2D arrays and matrices
+
+It is also possible to get iterators over rows or columns of 2D expressions.
+Those are available through the `rowwise()` and `colwise()` proxies.
+Here is an example sorting each row of a matrix:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Tutorial_std_sort_rows_cxx11.cpp
+</td>
+<td>
+\verbinclude Tutorial_std_sort_rows_cxx11.out
+</td></tr></table>
+
+*/
+
+}
diff --git a/doc/TutorialSlicingIndexing.dox b/doc/TutorialSlicingIndexing.dox
new file mode 100644
index 000000000..98ace43e4
--- /dev/null
+++ b/doc/TutorialSlicingIndexing.dox
@@ -0,0 +1,244 @@
+namespace Eigen {
+
+/** \eigenManualPage TutorialSlicingIndexing Slicing and Indexing
+
+This page presents the numerous possibilities offered by `operator()` to index sub-set of rows and columns.
+This API has been introduced in %Eigen 3.4.
+It supports all the feature proposed by the \link TutorialBlockOperations block API \endlink, and much more.
+In particular, it supports \b slicing that consists in taking a set of rows, columns, or elements, uniformly spaced within a matrix or indexed from an array of indices.
+
+\eigenAutoToc
+
+\section TutorialSlicingOverview Overview
+
+All the aforementioned operations are handled through the generic DenseBase::operator()(const RowIndices&, const ColIndices&) method.
+Each argument can be:
+ - An integer indexing a single row or column, including symbolic indices.
+ - The symbol Eigen::all representing the whole set of respective rows or columns in increasing order.
+ - An ArithmeticSequence as constructed by the Eigen::seq, Eigen::seqN, or Eigen::lastN functions.
+ - Any 1D vector/array of integers including %Eigen's vector/array, expressions, std::vector, std::array, as well as plain C arrays: `int[N]`.
+
+More generally, it can accepts any object exposing the following two member functions:
+ \code
+ <integral type> operator[](<integral type>) const;
+ <integral type> size() const;
+ \endcode
+where `<integral type>` stands for any integer type compatible with Eigen::Index (i.e. `std::ptrdiff_t`).
+
+\section TutorialSlicingBasic Basic slicing
+
+Taking a set of rows, columns, or elements, uniformly spaced within a matrix or vector is achieved through the Eigen::seq or Eigen::seqN functions where "seq" stands for arithmetic sequence. Their signatures are summarized below:
+
+<table class="manual">
+<tr>
+ <th>function</th>
+ <th>description</th>
+ <th>example</th>
+</tr>
+<tr>
+ <td>\code seq(firstIdx,lastIdx) \endcode</td>
+ <td>represents the sequence of integers ranging from \c firstIdx to \c lastIdx</td>
+ <td>\code seq(2,5) <=> {2,3,4,5} \endcode</td>
+</tr>
+<tr>
+ <td>\code seq(firstIdx,lastIdx,incr) \endcode</td>
+ <td>same but using the increment \c incr to advance from one index to the next</td>
+ <td>\code seq(2,8,2) <=> {2,4,6,8} \endcode</td>
+</tr>
+<tr>
+ <td>\code seqN(firstIdx,size) \endcode</td>
+ <td>represents the sequence of \c size integers starting from \c firstIdx</td>
+ <td>\code seqN(2,5) <=> {2,3,4,5,6} \endcode</td>
+</tr>
+<tr>
+ <td>\code seqN(firstIdx,size,incr) \endcode</td>
+ <td>same but using the increment \c incr to advance from one index to the next</td>
+ <td>\code seqN(2,3,3) <=> {2,5,8} \endcode</td>
+</tr>
+</table>
+
+The \c firstIdx and \c lastIdx parameters can also be defined with the help of the Eigen::last symbol representing the index of the last row, column or element of the underlying matrix/vector once the arithmetic sequence is passed to it through operator().
+Here are some examples for a 2D array/matrix \c A and a 1D array/vector \c v.
+<table class="manual">
+<tr>
+ <th>Intent</th>
+ <th>Code</th>
+ <th>Block-API equivalence</th>
+</tr>
+<tr>
+ <td>Bottom-left corner starting at row \c i with \c n columns</td>
+ <td>\code A(seq(i,last), seqN(0,n)) \endcode</td>
+ <td>\code A.bottomLeftCorner(A.rows()-i,n) \endcode</td>
+</tr>
+<tr>
+ <td>%Block starting at \c i,j having \c m rows, and \c n columns</td>
+ <td>\code A(seqN(i,m), seqN(i,n) \endcode</td>
+ <td>\code A.block(i,j,m,n) \endcode</td>
+</tr>
+<tr>
+ <td>%Block starting at \c i0,j0 and ending at \c i1,j1</td>
+ <td>\code A(seq(i0,i1), seq(j0,j1) \endcode</td>
+ <td>\code A.block(i0,j0,i1-i0+1,j1-j0+1) \endcode</td>
+</tr>
+<tr>
+ <td>Even columns of A</td>
+ <td>\code A(all, seq(0,last,2)) \endcode</td>
+ <td></td>
+</tr>
+<tr>
+ <td>First \c n odd rows A</td>
+ <td>\code A(seqN(1,n,2), all) \endcode</td>
+ <td></td>
+</tr>
+<tr>
+ <td>The last past one column</td>
+ <td>\code A(all, last-1) \endcode</td>
+ <td>\code A.col(A.cols()-2) \endcode</td>
+</tr>
+<tr>
+ <td>The middle row</td>
+ <td>\code A(last/2,all) \endcode</td>
+ <td>\code A.row((A.rows()-1)/2) \endcode</td>
+</tr>
+<tr>
+ <td>Last elements of v starting at i</td>
+ <td>\code v(seq(i,last)) \endcode</td>
+ <td>\code v.tail(v.size()-i) \endcode</td>
+</tr>
+<tr>
+ <td>Last \c n elements of v</td>
+ <td>\code v(seq(last+1-n,last)) \endcode</td>
+ <td>\code v.tail(n) \endcode</td>
+</tr>
+</table>
+
+As seen in the last exemple, referencing the <i> last n </i> elements (or rows/columns) is a bit cumbersome to write.
+This becomes even more tricky and error prone with a non-default increment.
+Here comes \link Eigen::lastN(SizeType) Eigen::lastN(size) \endlink, and \link Eigen::lastN(SizeType,IncrType) Eigen::lastN(size,incr) \endlink:
+
+<table class="manual">
+<tr>
+ <th>Intent</th>
+ <th>Code</th>
+ <th>Block-API equivalence</th>
+</tr>
+<tr>
+ <td>Last \c n elements of v</td>
+ <td>\code v(lastN(n)) \endcode</td>
+ <td>\code v.tail(n) \endcode</td>
+</tr>
+<tr>
+ <td>Bottom-right corner of A of size \c m times \c n</td>
+ <td>\code v(lastN(m), lastN(n)) \endcode</td>
+ <td>\code A.bottomRightCorner(m,n) \endcode</td>
+</tr>
+<tr>
+ <td>Bottom-right corner of A of size \c m times \c n</td>
+ <td>\code v(lastN(m), lastN(n)) \endcode</td>
+ <td>\code A.bottomRightCorner(m,n) \endcode</td>
+</tr>
+<tr>
+ <td>Last \c n columns taking 1 column over 3</td>
+ <td>\code A(all, lastN(n,3)) \endcode</td>
+ <td></td>
+</tr>
+</table>
+
+\section TutorialSlicingFixed Compile time size and increment
+
+In terms of performance, %Eigen and the compiler can take advantage of compile-time size and increment.
+To this end, you can enforce compile-time parameters using Eigen::fix<val>.
+Such compile-time value can be combined with the Eigen::last symbol:
+\code v(seq(last-fix<7>, last-fix<2>))
+\endcode
+In this example %Eigen knowns at compile-time that the returned expression has 6 elements.
+It is equivalent to:
+\code v(seqN(last-7, fix<6>))
+\endcode
+
+We can revisit the <i>even columns of A</i> example as follows:
+\code A(all, seq(0,last,fix<2>))
+\endcode
+
+
+\section TutorialSlicingReverse Reverse order
+
+Row/column indices can also be enumerated in decreasing order using a negative increment.
+For instance, one over two columns of A from the column 20 to 10:
+\code A(all, seq(20, 10, fix<-2>))
+\endcode
+The last \c n rows starting from the last one:
+\code A(seqN(last, n, fix<-1>), all)
+\endcode
+You can also use the ArithmeticSequence::reverse() method to reverse its order.
+The previous example can thus also be written as:
+\code A(lastN(n).reverse(), all)
+\endcode
+
+
+\section TutorialSlicingArray Array of indices
+
+The generic `operator()` can also takes as input an arbitrary list of row or column indices stored as either an `ArrayXi`, a `std::vector<int>`, `std::array<int,N>`, etc.
+
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Slicing_stdvector_cxx11.cpp
+</td>
+<td>
+\verbinclude Slicing_stdvector_cxx11.out
+</td></tr></table>
+
+You can also directly pass a static array:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Slicing_rawarray_cxx11.cpp
+</td>
+<td>
+\verbinclude Slicing_rawarray_cxx11.out
+</td></tr></table>
+
+or expressions:
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Slicing_arrayexpr.cpp
+</td>
+<td>
+\verbinclude Slicing_arrayexpr.out
+</td></tr></table>
+
+When passing an object with a compile-time size such as `Array4i`, `std::array<int,N>`, or a static array, then the returned expression also exhibit compile-time dimensions.
+
+\section TutorialSlicingCustomArray Custom index list
+
+More generally, `operator()` can accept as inputs any object \c ind of type \c T compatible with:
+\code
+Index s = ind.size(); or Index s = size(ind);
+Index i;
+i = ind[i];
+\endcode
+
+This means you can easily build your own fancy sequence generator and pass it to `operator()`.
+Here is an exemple enlarging a given matrix while padding the additional first rows and columns through repetition:
+
+<table class="example">
+<tr><th>Example:</th><th>Output:</th></tr>
+<tr><td>
+\include Slicing_custom_padding_cxx11.cpp
+</td>
+<td>
+\verbinclude Slicing_custom_padding_cxx11.out
+</td></tr></table>
+
+<br>
+
+*/
+
+/*
+TODO add:
+so_repeat_inner.cpp
+so_repeleme.cpp
+*/
+}
diff --git a/doc/TutorialSparse.dox b/doc/TutorialSparse.dox
index 352907408..c69171ec5 100644
--- a/doc/TutorialSparse.dox
+++ b/doc/TutorialSparse.dox
@@ -57,10 +57,10 @@ The \c "_" indicates available free space to quickly insert new elements.
Assuming no reallocation is needed, the insertion of a random element is therefore in O(nnz_j) where nnz_j is the number of nonzeros of the respective inner vector.
On the other hand, inserting elements with increasing inner indices in a given inner vector is much more efficient since this only requires to increase the respective \c InnerNNZs entry that is a O(1) operation.
-The case where no empty space is available is a special case, and is refered as the \em compressed mode.
+The case where no empty space is available is a special case, and is referred as the \em compressed mode.
It corresponds to the widely used Compressed Column (or Row) Storage schemes (CCS or CRS).
Any SparseMatrix can be turned to this form by calling the SparseMatrix::makeCompressed() function.
-In this case, one can remark that the \c InnerNNZs array is redundant with \c OuterStarts because we the equality: \c InnerNNZs[j] = \c OuterStarts[j+1]-\c OuterStarts[j].
+In this case, one can remark that the \c InnerNNZs array is redundant with \c OuterStarts because we have the equality: \c InnerNNZs[j] = \c OuterStarts[j+1]-\c OuterStarts[j].
Therefore, in practice a call to SparseMatrix::makeCompressed() frees this buffer.
It is worth noting that most of our wrappers to external libraries requires compressed matrices as inputs.
@@ -212,7 +212,7 @@ See the SparseMatrix::setFromTriplets() function and class Triplet for more deta
In some cases, however, slightly higher performance, and lower memory consumption can be reached by directly inserting the non-zeros into the destination matrix.
-A typical scenario of this approach is illustrated bellow:
+A typical scenario of this approach is illustrated below:
\code
1: SparseMatrix<double> mat(rows,cols); // default is column major
2: mat.reserve(VectorXi::Constant(cols,6));
diff --git a/doc/UnalignedArrayAssert.dox b/doc/UnalignedArrayAssert.dox
index 95d95a2d5..410c8a58f 100644
--- a/doc/UnalignedArrayAssert.dox
+++ b/doc/UnalignedArrayAssert.dox
@@ -12,7 +12,9 @@ is explained here: http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArr
**** READ THIS WEB PAGE !!! ****"' failed.
</pre>
-There are 4 known causes for this issue. Please read on to understand them and learn how to fix them.
+There are 4 known causes for this issue.
+If you can target \cpp17 only with a recent compiler (e.g., GCC>=7, clang>=5, MSVC>=19.12), then you're lucky: enabling c++17 should be enough (if not, please <a href="http://eigen.tuxfamily.org/bz/">report</a> to us).
+Otherwise, please read on to understand those issues and learn how to fix them.
\eigenAutoToc
@@ -35,7 +37,7 @@ If you have code like this,
class Foo
{
//...
- Eigen::Vector2d v;
+ Eigen::Vector4d v;
//...
};
//...
@@ -44,27 +46,27 @@ Foo *foo = new Foo;
then you need to read this separate page: \ref TopicStructHavingEigenMembers "Structures Having Eigen Members".
-Note that here, Eigen::Vector2d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
+Note that here, Eigen::Vector4d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
\section c2 Cause 2: STL Containers or manual memory allocation
If you use STL Containers such as std::vector, std::map, ..., with %Eigen objects, or with classes containing %Eigen objects, like this,
\code
-std::vector<Eigen::Matrix2f> my_vector;
-struct my_class { ... Eigen::Matrix2f m; ... };
+std::vector<Eigen::Matrix2d> my_vector;
+struct my_class { ... Eigen::Matrix2d m; ... };
std::map<int, my_class> my_map;
\endcode
then you need to read this separate page: \ref TopicStlContainers "Using STL Containers with Eigen".
-Note that here, Eigen::Matrix2f is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
+Note that here, Eigen::Matrix2d is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types" and \ref TopicStructHavingEigenMembers "structures having such Eigen objects as member".
-The same issue will be exhibited by any classes/functions by-passing operator new to allocate memory, that is, by performing custom memory allocation followed by calls to the placement new operator. This is for instance typically the case of \c std::make_shared or \c std::allocate_shared for which is the solution is to use an \ref aligned_allocator "aligned allocator" as detailed in the \ref TopicStlContainers "solution for STL containers".
+The same issue will be exhibited by any classes/functions by-passing operator new to allocate memory, that is, by performing custom memory allocation followed by calls to the placement new operator. This is for instance typically the case of \c `std::make_shared` or `std::allocate_shared` for which is the solution is to use an \ref aligned_allocator "aligned allocator" as detailed in the \ref TopicStlContainers "solution for STL containers".
\section c3 Cause 3: Passing Eigen objects by value
-If some function in your code is getting an Eigen object passed by value, like this,
+If some function in your code is getting an %Eigen object passed by value, like this,
\code
void func(Eigen::Vector4d v);
@@ -90,11 +92,13 @@ then you need to read this separate page: \ref TopicWrongStackAlignment "Compile
Note that here, Eigen::Quaternionf is only used as an example, more generally the issue arises for all \ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen types".
+
\section explanation General explanation of this assertion
-\ref TopicFixedSizeVectorizable "fixed-size vectorizable Eigen objects" must absolutely be created at 16-byte-aligned locations, otherwise SIMD instructions addressing them will crash.
+\ref TopicFixedSizeVectorizable "Fixed-size vectorizable Eigen objects" must absolutely be created at properly aligned locations, otherwise SIMD instructions addressing them will crash.
+For instance, SSE/NEON/MSA/Altivec/VSX targets will require 16-byte-alignment, whereas AVX and AVX512 targets may require up to 32 and 64 byte alignment respectively.
-Eigen normally takes care of these alignment issues for you, by setting an alignment attribute on them and by overloading their "operator new".
+%Eigen normally takes care of these alignment issues for you, by setting an alignment attribute on them and by overloading their `operator new`.
However there are a few corner cases where these alignment settings get overridden: they are the possible causes for this assertion.
@@ -102,18 +106,27 @@ However there are a few corner cases where these alignment settings get overridd
Three possibilities:
<ul>
- <li>Use the \c DontAlign option to Matrix, Array, Quaternion, etc. objects that gives you trouble. This way Eigen won't try to align them, and thus won"t assume any special alignment. On the down side, you will pay the cost of unaligned loads/stores for them, but on modern CPUs, the overhead is either null or marginal. See \link StructHavingEigenMembers_othersolutions here \endlink for an example.</li>
- <li>Define \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_ALIGN_STATICALLY \endlink. That disables all 16-byte (and above) static alignment code, while keeping 16-byte (or above) heap alignment. This has the effect of
+ <li>Use the \c DontAlign option to Matrix, Array, Quaternion, etc. objects that gives you trouble. This way %Eigen won't try to over-align them, and thus won"t assume any special alignment. On the down side, you will pay the cost of unaligned loads/stores for them, but on modern CPUs, the overhead is either null or marginal. See \link StructHavingEigenMembers_othersolutions here \endlink for an example.</li>
+ <li>Define \link TopicPreprocessorDirectivesPerformance EIGEN_MAX_STATIC_ALIGN_BYTES \endlink to 0. That disables all 16-byte (and above) static alignment code, while keeping 16-byte (or above) heap alignment. This has the effect of
vectorizing fixed-size objects (like Matrix4d) through unaligned stores (as controlled by \link TopicPreprocessorDirectivesPerformance EIGEN_UNALIGNED_VECTORIZE \endlink), while keeping unchanged the vectorization of dynamic-size objects
- (like MatrixXd). But do note that this breaks ABI compatibility with the default behavior of static alignment.</li>
- <li>Or define both \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_VECTORIZE \endlink and EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT. This keeps the
- 16-byte alignment code and thus preserves ABI compatibility, but completely disables vectorization.</li>
+ (like MatrixXd). On 64 bytes systems, you might also define it 16 to disable only 32 and 64 bytes of over-alignment. But do note that this breaks ABI compatibility with the default behavior of static alignment.</li>
+ <li>Or define both \link TopicPreprocessorDirectivesPerformance EIGEN_DONT_VECTORIZE \endlink and `EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT`. This keeps the
+ 16-byte (or above) alignment code and thus preserves ABI compatibility, but completely disables vectorization.</li>
</ul>
-If you want to know why defining EIGEN_DONT_VECTORIZE does not by itself disable 16-byte alignment and the assertion, here's the explanation:
+If you want to know why defining `EIGEN_DONT_VECTORIZE` does not by itself disable 16-byte (or above) alignment and the assertion, here's the explanation:
It doesn't disable the assertion, because otherwise code that runs fine without vectorization would suddenly crash when enabling vectorization.
-It doesn't disable 16-byte alignment, because that would mean that vectorized and non-vectorized code are not mutually ABI-compatible. This ABI compatibility is very important, even for people who develop only an in-house application, as for instance one may want to have in the same application a vectorized path and a non-vectorized path.
+It doesn't disable 16-byte (or above) alignment, because that would mean that vectorized and non-vectorized code are not mutually ABI-compatible. This ABI compatibility is very important, even for people who develop only an in-house application, as for instance one may want to have in the same application a vectorized path and a non-vectorized path.
+
+\section checkmycode How can I check my code is safe regarding alignment issues?
+
+Unfortunately, there is no possibility in c++ to detect any of the aforementioned shortcoming at compile time (though static analyzers are becoming more and more powerful and could detect some of them).
+Even at runtime, all we can do is to catch invalid unaligned allocation and trigger the explicit assertion mentioned at the beginning of this page.
+Therefore, if your program runs fine on a given system with some given compilation flags, then this does not guarantee that your code is safe. For instance, on most 64 bits systems buffer are aligned on 16 bytes boundary and so, if you do not enable AVX instruction set, then your code will run fine. On the other hand, the same code may assert if moving to a more exotic platform, or enabling AVX instructions that required 32 bytes alignment by default.
+
+The situation is not hopeless though. Assuming your code is well covered by unit test, then you can check its alignment safety by linking it to a custom malloc library returning 8 bytes aligned buffers only. This way all alignment shortcomings should pop-up. To this end, you must also compile your program with \link TopicPreprocessorDirectivesPerformance EIGEN_MALLOC_ALREADY_ALIGNED=0 \endlink.
+
*/
diff --git a/doc/UsingIntelMKL.dox b/doc/UsingIntelMKL.dox
index a1a3a18f2..fc35c3cf0 100644
--- a/doc/UsingIntelMKL.dox
+++ b/doc/UsingIntelMKL.dox
@@ -63,6 +63,12 @@ In addition you can choose which parts will be substituted by defining one or mu
<tr><td>\c EIGEN_USE_MKL_ALL </td><td>Defines \c EIGEN_USE_BLAS, \c EIGEN_USE_LAPACKE, and \c EIGEN_USE_MKL_VML </td></tr>
</table>
+The \c EIGEN_USE_BLAS and \c EIGEN_USE_LAPACKE* macros can be combined with \c EIGEN_USE_MKL to explicitly tell Eigen that the underlying BLAS/Lapack implementation is Intel MKL.
+The main effect is to enable MKL direct call feature (\c MKL_DIRECT_CALL).
+This may help to increase performance of some MKL BLAS (?GEMM, ?GEMV, ?TRSM, ?AXPY and ?DOT) and LAPACK (LU, Cholesky and QR) routines for very small matrices.
+MKL direct call can be disabled by defining \c EIGEN_MKL_NO_DIRECT_CALL.
+
+
Note that the BLAS and LAPACKE backends can be enabled for any F77 compatible BLAS and LAPACK libraries. See this \link TopicUsingBlasLapack page \endlink for the details.
Finally, the PARDISO sparse solver shipped with Intel MKL can be used through the \ref PardisoLU, \ref PardisoLLT and \ref PardisoLDLT classes of the \ref PardisoSupport_Module.
diff --git a/doc/UsingNVCC.dox b/doc/UsingNVCC.dox
index f8e755b79..36beb2ddd 100644
--- a/doc/UsingNVCC.dox
+++ b/doc/UsingNVCC.dox
@@ -3,18 +3,16 @@ namespace Eigen {
/** \page TopicCUDA Using Eigen in CUDA kernels
-\b Disclaimer: this page is about an \b experimental feature in %Eigen.
-
-Staring from CUDA 5.0, the CUDA compiler, \c nvcc, is able to properly parse %Eigen's code (almost).
-A few adaptations of the %Eigen's code already allows to use some parts of %Eigen in your own CUDA kernels.
-To this end you need the devel branch of %Eigen, CUDA 5.0 or greater with GCC.
+Staring from CUDA 5.5 and Eigen 3.3, it is possible to use Eigen's matrices, vectors, and arrays for fixed size within CUDA kernels. This is especially useful when working on numerous but small problems. By default, when Eigen's headers are included within a .cu file compiled by nvcc most Eigen's functions and methods are prefixed by the \c __device__ \c __host__ keywords making them callable from both host and device code.
+This support can be disabled by defining \c EIGEN_NO_CUDA before including any Eigen's header.
+This might be useful to disable some warnings when a .cu file makes use of Eigen on the host side only.
+However, in both cases, host's SIMD vectorization has to be disabled in .cu files.
+It is thus \b strongly \b recommended to properly move all costly host computation from your .cu files to regular .cpp files.
Known issues:
- \c nvcc with MS Visual Studio does not work (patch welcome)
- - \c nvcc with \c clang does not work (patch welcome)
-
- \c nvcc 5.5 with gcc-4.7 (or greater) has issues with the standard \c \<limits\> header file. To workaround this, you can add the following before including any other files:
\code
// workaround issue between gcc >= 4.7 and cuda 5.5
diff --git a/doc/eigen_navtree_hacks.js b/doc/eigen_navtree_hacks.js
index bd7e02b38..afb97edf5 100644
--- a/doc/eigen_navtree_hacks.js
+++ b/doc/eigen_navtree_hacks.js
@@ -5,6 +5,7 @@ function generate_autotoc() {
if(headers.length > 1) {
var toc = $("#side-nav").append('<div id="nav-toc" class="toc"><h3>Table of contents</h3></div>');
toc = $("#nav-toc");
+ var footer = $("#nav-path");
var footerHeight = footer.height();
toc = toc.append('<ul></ul>');
toc = toc.find('ul');
@@ -64,14 +65,20 @@ function getNode(o, po)
// Overloaded to adjust the size of the navtree wrt the toc
function resizeHeight()
{
- var toc = $("#nav-toc");
- var tocHeight = toc.height(); // <- we added this line
- var headerHeight = header.height();
- var footerHeight = footer.height();
+ var header = $("#top");
+ var sidenav = $("#side-nav");
+ var content = $("#doc-content");
+ var navtree = $("#nav-tree");
+ var footer = $("#nav-path");
+ var toc = $("#nav-toc");
+
+ var headerHeight = header.outerHeight();
+ var footerHeight = footer.outerHeight();
+ var tocHeight = toc.height();
var windowHeight = $(window).height() - headerHeight - footerHeight;
content.css({height:windowHeight + "px"});
- navtree.css({height:(windowHeight-tocHeight) + "px"}); // <- we modified this line
- sidenav.css({height:(windowHeight) + "px",top: headerHeight+"px"});
+ navtree.css({height:(windowHeight-tocHeight) + "px"});
+ sidenav.css({height:windowHeight + "px"});
}
// Overloaded to save the root node into global_navtree_object
@@ -131,7 +138,7 @@ function initNavTree(toroot,relpath)
}
})
- $(window).load(showRoot);
+ $(window).on("load", showRoot);
}
// return false if the the node has no children at all, or has only section/subsection children
@@ -155,19 +162,18 @@ function createIndent(o,domNode,node,level)
var level=-2; // <- we replaced level=-1 by level=-2
var n = node;
while (n.parentNode) { level++; n=n.parentNode; }
- var imgNode = document.createElement("img");
- imgNode.style.paddingLeft=(16*(level)).toString()+'px';
- imgNode.width = 16;
- imgNode.height = 22;
- imgNode.border = 0;
if (checkChildrenData(node)) { // <- we modified this line to use checkChildrenData(node) instead of node.childrenData
+ var imgNode = document.createElement("span");
+ imgNode.className = 'arrow';
+ imgNode.style.paddingLeft=(16*level).toString()+'px';
+ imgNode.innerHTML=arrowRight;
node.plus_img = imgNode;
node.expandToggle = document.createElement("a");
node.expandToggle.href = "javascript:void(0)";
node.expandToggle.onclick = function() {
if (node.expanded) {
$(node.getChildrenUL()).slideUp("fast");
- node.plus_img.src = node.relpath+"ftv2pnode.png";
+ node.plus_img.innerHTML=arrowRight;
node.expanded = false;
} else {
expandNode(o, node, false, false);
@@ -175,11 +181,13 @@ function createIndent(o,domNode,node,level)
}
node.expandToggle.appendChild(imgNode);
domNode.appendChild(node.expandToggle);
- imgNode.src = node.relpath+"ftv2pnode.png";
} else {
- imgNode.src = node.relpath+"ftv2node.png";
- domNode.appendChild(imgNode);
- }
+ var span = document.createElement("span");
+ span.className = 'arrow';
+ span.style.width = 16*(level+1)+'px';
+ span.innerHTML = '&#160;';
+ domNode.appendChild(span);
+ }
}
// Overloaded to automatically expand the selected node
@@ -233,8 +241,7 @@ $(document).ready(function() {
setTimeout(arguments.callee, 10);
}
})();
-});
-$(window).load(function() {
- resizeHeight();
+ $(window).on("load", resizeHeight);
});
+
diff --git a/doc/eigendoxy.css b/doc/eigendoxy.css
index 6274e6c70..4e9d7d120 100644
--- a/doc/eigendoxy.css
+++ b/doc/eigendoxy.css
@@ -93,7 +93,7 @@ table th.inter {
border-color: #cccccc;
}
-/** class for exemple / output tables **/
+/** class for example / output tables **/
table.example {
}
@@ -165,6 +165,8 @@ div.toc {
bottom:0;
border-radius:0px;
border-style: solid none none none;
+ max-height:50%;
+ overflow-y: scroll;
}
div.toc h3 {
@@ -181,6 +183,18 @@ span.cpp11,span.cpp14,span.cpp17 {
font-weight: bold;
}
+.newin3x {
+ color: #a37c1a;
+ font-weight: bold;
+}
+
+div.warningbox {
+ max-width:60em;
+ border-style: solid solid solid solid;
+ border-color: red;
+ border-width: 3px;
+}
+
/**** old Eigen's styles ****/
@@ -214,3 +228,8 @@ h3.version {
td.width20em p.endtd {
width: 20em;
}
+
+/* needed for huge screens */
+.ui-resizable-e {
+ background-repeat: repeat-y;
+}
diff --git a/doc/eigendoxy_footer.html.in b/doc/eigendoxy_footer.html.in
index 878244a19..126653589 100644
--- a/doc/eigendoxy_footer.html.in
+++ b/doc/eigendoxy_footer.html.in
@@ -5,31 +5,18 @@
$navpath
<li class="footer">$generatedby
<a href="http://www.doxygen.org/index.html">
- <img class="footer" src="$relpath$doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
+ <img class="footer" src="$relpath^doxygen.png" alt="doxygen"/></a> $doxygenversion </li>
</ul>
</div>
<!--END GENERATE_TREEVIEW-->
<!--BEGIN !GENERATE_TREEVIEW-->
<hr class="footer"/><address class="footer"><small>
$generatedby &#160;<a href="http://www.doxygen.org/index.html">
-<img class="footer" src="$relpath$doxygen.png" alt="doxygen"/>
+<img class="footer" src="$relpath^doxygen.png" alt="doxygen"/>
</a> $doxygenversion
</small></address>
<!--END !GENERATE_TREEVIEW-->
-<!-- Piwik -->
-<script type="text/javascript">
-var pkBaseURL = (("https:" == document.location.protocol) ? "https://stats.sylphide-consulting.com/piwik/" : "http://stats.sylphide-consulting.com/piwik/");
-document.write(unescape("%3Cscript src='" + pkBaseURL + "piwik.js' type='text/javascript'%3E%3C/script%3E"));
-</script><script type="text/javascript">
-try {
-var piwikTracker = Piwik.getTracker(pkBaseURL + "piwik.php", 20);
-piwikTracker.trackPageView();
-piwikTracker.enableLinkTracking();
-} catch( err ) {}
-</script><noscript><p><img src="http://stats.sylphide-consulting.com/piwik/piwik.php?idsite=20" style="border:0" alt="" /></p></noscript>
-<!-- End Piwik Tracking Code -->
-
</body>
</html>
diff --git a/doc/eigendoxy_header.html.in b/doc/eigendoxy_header.html.in
index 0f3859f40..a6b1c1d08 100644
--- a/doc/eigendoxy_header.html.in
+++ b/doc/eigendoxy_header.html.in
@@ -4,25 +4,26 @@
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
+<meta name="viewport" content="width=device-width, initial-scale=1"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
-<link href="$relpath$tabs.css" rel="stylesheet" type="text/css"/>
-<script type="text/javascript" src="$relpath$jquery.js"></script>
-<script type="text/javascript" src="$relpath$dynsections.js"></script>
+<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="$relpath^jquery.js"></script>
+<script type="text/javascript" src="$relpath^dynsections.js"></script>
$treeview
$search
$mathjax
-<link href="$relpath$$stylesheet" rel="stylesheet" type="text/css" />
+<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
<link href="$relpath$eigendoxy.css" rel="stylesheet" type="text/css">
<!-- $extrastylesheet -->
<script type="text/javascript" src="$relpath$eigen_navtree_hacks.js"></script>
-<!-- <script type="text/javascript"> -->
-<!-- </script> -->
</head>
<body>
+
+<div style="background:#FFDDDD;font-size:120%;text-align:center;margin:0;padding:5px">Please, help us to better know about our user community by answering the following short survey: <a href="https://forms.gle/wpyrxWi18ox9Z5ae9">https://forms.gle/wpyrxWi18ox9Z5ae9</a></div>
+
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
-<!-- <a name="top"></a> -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
@@ -30,10 +31,10 @@ $mathjax
<tbody>
<tr style="height: 56px;">
<!--BEGIN PROJECT_LOGO-->
- <td id="projectlogo"><img alt="Logo" src="$relpath$$projectlogo"/></td>
+ <td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td>
<!--END PROJECT_LOGO-->
<!--BEGIN PROJECT_NAME-->
- <td style="padding-left: 0.5em;">
+ <td id="projectalign" style="padding-left: 0.5em;">
<div id="projectname"><a href="http://eigen.tuxfamily.org">$projectname</a>
<!--BEGIN PROJECT_NUMBER-->&#160;<span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER-->
</div>
@@ -42,7 +43,7 @@ $mathjax
<!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME-->
<!--BEGIN PROJECT_BRIEF-->
- <td style="padding-left: 0.5em;">
+ <td id="projectalign" style="padding-left: 0.5em;">
<div id="projectbrief">$projectbrief</div>
</td>
<!--END PROJECT_BRIEF-->
diff --git a/doc/examples/CMakeLists.txt b/doc/examples/CMakeLists.txt
index f7a19055f..a2c9d05a4 100644
--- a/doc/examples/CMakeLists.txt
+++ b/doc/examples/CMakeLists.txt
@@ -13,9 +13,8 @@ foreach(example_src ${examples_SRCS})
ARGS >${CMAKE_CURRENT_BINARY_DIR}/${example}.out
)
add_dependencies(all_examples ${example})
-endforeach(example_src)
+endforeach()
-check_cxx_compiler_flag("-std=c++11" EIGEN_COMPILER_SUPPORT_CPP11)
if(EIGEN_COMPILER_SUPPORT_CPP11)
ei_add_target_property(nullary_indexing COMPILE_FLAGS "-std=c++11")
endif() \ No newline at end of file
diff --git a/doc/examples/Cwise_lgamma.cpp b/doc/examples/Cwise_lgamma.cpp
index f1c4f503e..6bfaccbce 100644
--- a/doc/examples/Cwise_lgamma.cpp
+++ b/doc/examples/Cwise_lgamma.cpp
@@ -6,4 +6,4 @@ int main()
{
Array4d v(0.5,10,0,-1);
std::cout << v.lgamma() << std::endl;
-} \ No newline at end of file
+}
diff --git a/doc/examples/TutorialLinAlgSVDSolve.cpp b/doc/examples/TutorialLinAlgSVDSolve.cpp
index 9fbc031de..f109f04e5 100644
--- a/doc/examples/TutorialLinAlgSVDSolve.cpp
+++ b/doc/examples/TutorialLinAlgSVDSolve.cpp
@@ -11,5 +11,5 @@ int main()
VectorXf b = VectorXf::Random(3);
cout << "Here is the right hand side b:\n" << b << endl;
cout << "The least-squares solution is:\n"
- << A.jacobiSvd(ComputeThinU | ComputeThinV).solve(b) << endl;
+ << A.bdcSvd(ComputeThinU | ComputeThinV).solve(b) << endl;
}
diff --git a/doc/examples/Tutorial_BlockOperations_block_assignment.cpp b/doc/examples/Tutorial_BlockOperations_block_assignment.cpp
index 76f49f2fb..0b87313a1 100644
--- a/doc/examples/Tutorial_BlockOperations_block_assignment.cpp
+++ b/doc/examples/Tutorial_BlockOperations_block_assignment.cpp
@@ -14,5 +14,5 @@ int main()
a.block<2,2>(1,1) = m;
cout << "Here is now a with m copied into its central 2x2 block:" << endl << a << endl << endl;
a.block(0,0,2,3) = a.block(2,1,2,3);
- cout << "Here is now a with bottom-right 2x3 block copied into top-left 2x2 block:" << endl << a << endl << endl;
+ cout << "Here is now a with bottom-right 2x3 block copied into top-left 2x3 block:" << endl << a << endl << endl;
}
diff --git a/doc/examples/Tutorial_simple_example_dynamic_size.cpp b/doc/examples/Tutorial_simple_example_dynamic_size.cpp
index 0f0280e0e..defcb1ee4 100644
--- a/doc/examples/Tutorial_simple_example_dynamic_size.cpp
+++ b/doc/examples/Tutorial_simple_example_dynamic_size.cpp
@@ -10,7 +10,7 @@ int main()
MatrixXi m(size,size+1); // a (size)x(size+1)-matrix of int's
for (int j=0; j<m.cols(); ++j) // loop over columns
for (int i=0; i<m.rows(); ++i) // loop over rows
- m(i,j) = i+j*m.rows(); // to access matrix coefficients,
+ m(i,j) = i+j*size; // to access matrix coefficients,
// use operator()(int,int)
std::cout << m << "\n\n";
}
diff --git a/doc/examples/class_FixedReshaped.cpp b/doc/examples/class_FixedReshaped.cpp
new file mode 100644
index 000000000..b6d4085de
--- /dev/null
+++ b/doc/examples/class_FixedReshaped.cpp
@@ -0,0 +1,22 @@
+#include <Eigen/Core>
+#include <iostream>
+using namespace Eigen;
+using namespace std;
+
+template<typename Derived>
+Eigen::Reshaped<Derived, 4, 2>
+reshape_helper(MatrixBase<Derived>& m)
+{
+ return Eigen::Reshaped<Derived, 4, 2>(m.derived());
+}
+
+int main(int, char**)
+{
+ MatrixXd m(2, 4);
+ m << 1, 2, 3, 4,
+ 5, 6, 7, 8;
+ MatrixXd n = reshape_helper(m);
+ cout << "matrix m is:" << endl << m << endl;
+ cout << "matrix n is:" << endl << n << endl;
+ return 0;
+}
diff --git a/doc/examples/class_Reshaped.cpp b/doc/examples/class_Reshaped.cpp
new file mode 100644
index 000000000..18fb45454
--- /dev/null
+++ b/doc/examples/class_Reshaped.cpp
@@ -0,0 +1,23 @@
+#include <Eigen/Core>
+#include <iostream>
+using namespace std;
+using namespace Eigen;
+
+template<typename Derived>
+const Reshaped<const Derived>
+reshape_helper(const MatrixBase<Derived>& m, int rows, int cols)
+{
+ return Reshaped<const Derived>(m.derived(), rows, cols);
+}
+
+int main(int, char**)
+{
+ MatrixXd m(3, 4);
+ m << 1, 4, 7, 10,
+ 2, 5, 8, 11,
+ 3, 6, 9, 12;
+ cout << m << endl;
+ Ref<const MatrixXd> n = reshape_helper(m, 2, 6);
+ cout << "Matrix m is:" << endl << m << endl;
+ cout << "Matrix n is:" << endl << n << endl;
+}
diff --git a/doc/examples/matrixfree_cg.cpp b/doc/examples/matrixfree_cg.cpp
index 6a205aea3..74699381c 100644
--- a/doc/examples/matrixfree_cg.cpp
+++ b/doc/examples/matrixfree_cg.cpp
@@ -67,6 +67,7 @@ namespace internal {
// This method should implement "dst += alpha * lhs * rhs" inplace,
// however, for iterative solvers, alpha is always equal to 1, so let's not bother about it.
assert(alpha==Scalar(1) && "scaling is not implemented");
+ EIGEN_ONLY_USED_FOR_DEBUG(alpha);
// Here we could simply call dst.noalias() += lhs.my_matrix() * rhs,
// but let's do something fancier (and less efficient):
diff --git a/doc/examples/nullary_indexing.cpp b/doc/examples/nullary_indexing.cpp
index e27c3585a..b74db5fd1 100644
--- a/doc/examples/nullary_indexing.cpp
+++ b/doc/examples/nullary_indexing.cpp
@@ -30,7 +30,7 @@ public:
// [function]
template <class ArgType, class RowIndexType, class ColIndexType>
CwiseNullaryOp<indexing_functor<ArgType,RowIndexType,ColIndexType>, typename indexing_functor<ArgType,RowIndexType,ColIndexType>::MatrixType>
-indexing(const Eigen::MatrixBase<ArgType>& arg, const RowIndexType& row_indices, const ColIndexType& col_indices)
+mat_indexing(const Eigen::MatrixBase<ArgType>& arg, const RowIndexType& row_indices, const ColIndexType& col_indices)
{
typedef indexing_functor<ArgType,RowIndexType,ColIndexType> Func;
typedef typename Func::MatrixType MatrixType;
@@ -45,7 +45,7 @@ int main()
Eigen::MatrixXi A = Eigen::MatrixXi::Random(4,4);
Array3i ri(1,2,1);
ArrayXi ci(6); ci << 3,2,1,0,0,2;
- Eigen::MatrixXi B = indexing(A, ri, ci);
+ Eigen::MatrixXi B = mat_indexing(A, ri, ci);
std::cout << "A =" << std::endl;
std::cout << A << std::endl << std::endl;
std::cout << "A([" << ri.transpose() << "], [" << ci.transpose() << "]) =" << std::endl;
@@ -53,11 +53,11 @@ int main()
std::cout << "[main1]\n";
std::cout << "[main2]\n";
- B = indexing(A, ri+1, ci);
+ B = mat_indexing(A, ri+1, ci);
std::cout << "A(ri+1,ci) =" << std::endl;
std::cout << B << std::endl << std::endl;
-#if __cplusplus >= 201103L
- B = indexing(A, ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3));
+#if EIGEN_COMP_CXXVER >= 11
+ B = mat_indexing(A, ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3));
std::cout << "A(ArrayXi::LinSpaced(13,0,12).unaryExpr([](int x){return x%4;}), ArrayXi::LinSpaced(4,0,3)) =" << std::endl;
std::cout << B << std::endl << std::endl;
#endif
diff --git a/doc/snippets/Array_initializer_list_23_cxx11.cpp b/doc/snippets/Array_initializer_list_23_cxx11.cpp
new file mode 100644
index 000000000..2c2166eab
--- /dev/null
+++ b/doc/snippets/Array_initializer_list_23_cxx11.cpp
@@ -0,0 +1,5 @@
+ArrayXXi a {
+ {1, 2, 3},
+ {3, 4, 5}
+};
+cout << a << endl;
diff --git a/doc/snippets/Array_initializer_list_vector_cxx11.cpp b/doc/snippets/Array_initializer_list_vector_cxx11.cpp
new file mode 100644
index 000000000..a668d84ac
--- /dev/null
+++ b/doc/snippets/Array_initializer_list_vector_cxx11.cpp
@@ -0,0 +1,2 @@
+Array<int, Dynamic, 1> v {{1, 2, 3, 4, 5}};
+cout << v << endl;
diff --git a/doc/snippets/Array_variadic_ctor_cxx11.cpp b/doc/snippets/Array_variadic_ctor_cxx11.cpp
new file mode 100644
index 000000000..0e4ec4469
--- /dev/null
+++ b/doc/snippets/Array_variadic_ctor_cxx11.cpp
@@ -0,0 +1,3 @@
+Array<int, 1, 6> a(1, 2, 3, 4, 5, 6);
+Array<int, 3, 1> b {1, 2, 3};
+cout << a << "\n\n" << b << endl;
diff --git a/doc/snippets/BiCGSTAB_simple.cpp b/doc/snippets/BiCGSTAB_simple.cpp
index 5520f4f1f..8c8829fd3 100644
--- a/doc/snippets/BiCGSTAB_simple.cpp
+++ b/doc/snippets/BiCGSTAB_simple.cpp
@@ -8,4 +8,4 @@
std::cout << "#iterations: " << solver.iterations() << std::endl;
std::cout << "estimated error: " << solver.error() << std::endl;
/* ... update b ... */
- x = solver.solve(b); // solve again \ No newline at end of file
+ x = solver.solve(b); // solve again
diff --git a/doc/snippets/BiCGSTAB_step_by_step.cpp b/doc/snippets/BiCGSTAB_step_by_step.cpp
index 06147bb81..6c95d5a9c 100644
--- a/doc/snippets/BiCGSTAB_step_by_step.cpp
+++ b/doc/snippets/BiCGSTAB_step_by_step.cpp
@@ -11,4 +11,4 @@
x = solver.solveWithGuess(b,x);
std::cout << i << " : " << solver.error() << std::endl;
++i;
- } while (solver.info()!=Success && i<100); \ No newline at end of file
+ } while (solver.info()!=Success && i<100);
diff --git a/doc/snippets/CMakeLists.txt b/doc/snippets/CMakeLists.txt
index 1baf32fba..65f195a31 100644
--- a/doc/snippets/CMakeLists.txt
+++ b/doc/snippets/CMakeLists.txt
@@ -6,21 +6,31 @@ foreach(snippet_src ${snippets_SRCS})
get_filename_component(snippet ${snippet_src} NAME_WE)
set(compile_snippet_target compile_${snippet})
set(compile_snippet_src ${compile_snippet_target}.cpp)
- file(READ ${snippet_src} snippet_source_code)
- configure_file(${CMAKE_CURRENT_SOURCE_DIR}/compile_snippet.cpp.in
- ${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src})
- add_executable(${compile_snippet_target}
- ${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src})
- if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
- target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
+ if((NOT ${snippet_src} MATCHES "cxx11") OR EIGEN_COMPILER_SUPPORT_CPP11)
+ file(READ ${snippet_src} snippet_source_code)
+ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/compile_snippet.cpp.in
+ ${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src})
+ add_executable(${compile_snippet_target}
+ ${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src})
+ if(EIGEN_STANDARD_LIBRARIES_TO_LINK_TO)
+ target_link_libraries(${compile_snippet_target} ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
+ endif()
+ if(${snippet_src} MATCHES "cxx11")
+ set_target_properties(${compile_snippet_target} PROPERTIES COMPILE_FLAGS "-std=c++11")
+ endif()
+ if(${snippet_src} MATCHES "deprecated")
+ set_target_properties(${compile_snippet_target} PROPERTIES COMPILE_FLAGS "-DEIGEN_NO_DEPRECATED_WARNING")
+ endif()
+ add_custom_command(
+ TARGET ${compile_snippet_target}
+ POST_BUILD
+ COMMAND ${compile_snippet_target}
+ ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out
+ )
+ add_dependencies(all_snippets ${compile_snippet_target})
+ set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src}
+ PROPERTIES OBJECT_DEPENDS ${snippet_src})
+ else()
+ message("skip snippet ${snippet_src} because compiler does not support C++11")
endif()
- add_custom_command(
- TARGET ${compile_snippet_target}
- POST_BUILD
- COMMAND ${compile_snippet_target}
- ARGS >${CMAKE_CURRENT_BINARY_DIR}/${snippet}.out
- )
- add_dependencies(all_snippets ${compile_snippet_target})
- set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/${compile_snippet_src}
- PROPERTIES OBJECT_DEPENDS ${snippet_src})
-endforeach(snippet_src)
+endforeach()
diff --git a/doc/snippets/ComplexEigenSolver_eigenvectors.cpp b/doc/snippets/ComplexEigenSolver_eigenvectors.cpp
index bb1c2ccf1..adeed9af6 100644
--- a/doc/snippets/ComplexEigenSolver_eigenvectors.cpp
+++ b/doc/snippets/ComplexEigenSolver_eigenvectors.cpp
@@ -1,4 +1,4 @@
MatrixXcf ones = MatrixXcf::Ones(3,3);
ComplexEigenSolver<MatrixXcf> ces(ones);
cout << "The first eigenvector of the 3x3 matrix of ones is:"
- << endl << ces.eigenvectors().col(1) << endl;
+ << endl << ces.eigenvectors().col(0) << endl;
diff --git a/doc/snippets/Cwise_rint.cpp b/doc/snippets/Cwise_rint.cpp
new file mode 100644
index 000000000..1dc7b2fd1
--- /dev/null
+++ b/doc/snippets/Cwise_rint.cpp
@@ -0,0 +1,3 @@
+ArrayXd v = ArrayXd::LinSpaced(7,-2,2);
+cout << v << endl << endl;
+cout << rint(v) << endl;
diff --git a/doc/snippets/DenseBase_LinSpaced_seq.cpp b/doc/snippets/DenseBase_LinSpaced_seq_deprecated.cpp
index f55c5085d..f55c5085d 100644
--- a/doc/snippets/DenseBase_LinSpaced_seq.cpp
+++ b/doc/snippets/DenseBase_LinSpaced_seq_deprecated.cpp
diff --git a/doc/snippets/DirectionWise_hnormalized.cpp b/doc/snippets/DirectionWise_hnormalized.cpp
index 3410790a8..2451f6e7b 100644
--- a/doc/snippets/DirectionWise_hnormalized.cpp
+++ b/doc/snippets/DirectionWise_hnormalized.cpp
@@ -1,7 +1,6 @@
-typedef Matrix<double,4,Dynamic> Matrix4Xd;
Matrix4Xd M = Matrix4Xd::Random(4,5);
Projective3d P(Matrix4d::Random());
cout << "The matrix M is:" << endl << M << endl << endl;
cout << "M.colwise().hnormalized():" << endl << M.colwise().hnormalized() << endl << endl;
cout << "P*M:" << endl << P*M << endl << endl;
-cout << "(P*M).colwise().hnormalized():" << endl << (P*M).colwise().hnormalized() << endl << endl; \ No newline at end of file
+cout << "(P*M).colwise().hnormalized():" << endl << (P*M).colwise().hnormalized() << endl << endl;
diff --git a/doc/snippets/Jacobi_makeGivens.cpp b/doc/snippets/Jacobi_makeGivens.cpp
index 4b733c306..6f8ec054a 100644
--- a/doc/snippets/Jacobi_makeGivens.cpp
+++ b/doc/snippets/Jacobi_makeGivens.cpp
@@ -3,4 +3,4 @@ JacobiRotation<float> G;
G.makeGivens(v.x(), v.y());
cout << "Here is the vector v:" << endl << v << endl;
v.applyOnTheLeft(0, 1, G.adjoint());
-cout << "Here is the vector J' * v:" << endl << v << endl; \ No newline at end of file
+cout << "Here is the vector J' * v:" << endl << v << endl;
diff --git a/doc/snippets/Jacobi_makeJacobi.cpp b/doc/snippets/Jacobi_makeJacobi.cpp
index 0cc331d9f..a86e80a62 100644
--- a/doc/snippets/Jacobi_makeJacobi.cpp
+++ b/doc/snippets/Jacobi_makeJacobi.cpp
@@ -5,4 +5,4 @@ J.makeJacobi(m, 0, 1);
cout << "Here is the matrix m:" << endl << m << endl;
m.applyOnTheLeft(0, 1, J.adjoint());
m.applyOnTheRight(0, 1, J);
-cout << "Here is the matrix J' * m * J:" << endl << m << endl; \ No newline at end of file
+cout << "Here is the matrix J' * m * J:" << endl << m << endl;
diff --git a/doc/snippets/Map_placement_new.cpp b/doc/snippets/Map_placement_new.cpp
index 2e40eca32..83b83a893 100644
--- a/doc/snippets/Map_placement_new.cpp
+++ b/doc/snippets/Map_placement_new.cpp
@@ -2,4 +2,4 @@ int data[] = {1,2,3,4,5,6,7,8,9};
Map<RowVectorXi> v(data,4);
cout << "The mapped vector v is: " << v << "\n";
new (&v) Map<RowVectorXi>(data+4,5);
-cout << "Now v is: " << v << "\n"; \ No newline at end of file
+cout << "Now v is: " << v << "\n";
diff --git a/doc/snippets/MatrixBase_colwise_iterator_cxx11.cpp b/doc/snippets/MatrixBase_colwise_iterator_cxx11.cpp
new file mode 100644
index 000000000..116063fb1
--- /dev/null
+++ b/doc/snippets/MatrixBase_colwise_iterator_cxx11.cpp
@@ -0,0 +1,12 @@
+Matrix3i m = Matrix3i::Random();
+cout << "Here is the initial matrix m:" << endl << m << endl;
+int i = -1;
+for(auto c: m.colwise()) {
+ c *= i;
+ ++i;
+}
+cout << "Here is the matrix m after the for-range-loop:" << endl << m << endl;
+auto cols = m.colwise();
+auto it = std::find_if(cols.cbegin(), cols.cend(),
+ [](Matrix3i::ConstColXpr x) { return x.squaredNorm() == 0; });
+cout << "The first empty column is: " << distance(cols.cbegin(),it) << endl;
diff --git a/doc/snippets/MatrixBase_cwiseArg.cpp b/doc/snippets/MatrixBase_cwiseArg.cpp
new file mode 100644
index 000000000..e0857cf97
--- /dev/null
+++ b/doc/snippets/MatrixBase_cwiseArg.cpp
@@ -0,0 +1,3 @@
+MatrixXcf v = MatrixXcf::Random(2, 3);
+cout << v << endl << endl;
+cout << v.cwiseArg() << endl; \ No newline at end of file
diff --git a/doc/snippets/MatrixBase_cwiseEqual.cpp b/doc/snippets/MatrixBase_cwiseEqual.cpp
index eb3656f4c..469af642c 100644
--- a/doc/snippets/MatrixBase_cwiseEqual.cpp
+++ b/doc/snippets/MatrixBase_cwiseEqual.cpp
@@ -3,5 +3,5 @@ m << 1, 0,
1, 1;
cout << "Comparing m with identity matrix:" << endl;
cout << m.cwiseEqual(MatrixXi::Identity(2,2)) << endl;
-int count = m.cwiseEqual(MatrixXi::Identity(2,2)).count();
+Index count = m.cwiseEqual(MatrixXi::Identity(2,2)).count();
cout << "Number of coefficients that are equal: " << count << endl;
diff --git a/doc/snippets/MatrixBase_cwiseNotEqual.cpp b/doc/snippets/MatrixBase_cwiseNotEqual.cpp
index 6a2e4fb6c..7f0a105d6 100644
--- a/doc/snippets/MatrixBase_cwiseNotEqual.cpp
+++ b/doc/snippets/MatrixBase_cwiseNotEqual.cpp
@@ -3,5 +3,5 @@ m << 1, 0,
1, 1;
cout << "Comparing m with identity matrix:" << endl;
cout << m.cwiseNotEqual(MatrixXi::Identity(2,2)) << endl;
-int count = m.cwiseNotEqual(MatrixXi::Identity(2,2)).count();
+Index count = m.cwiseNotEqual(MatrixXi::Identity(2,2)).count();
cout << "Number of coefficients that are not equal: " << count << endl;
diff --git a/doc/snippets/MatrixBase_hnormalized.cpp b/doc/snippets/MatrixBase_hnormalized.cpp
index 652cd77c0..b714adcc3 100644
--- a/doc/snippets/MatrixBase_hnormalized.cpp
+++ b/doc/snippets/MatrixBase_hnormalized.cpp
@@ -3,4 +3,4 @@ Projective3d P(Matrix4d::Random());
cout << "v = " << v.transpose() << "]^T" << endl;
cout << "v.hnormalized() = " << v.hnormalized().transpose() << "]^T" << endl;
cout << "P*v = " << (P*v).transpose() << "]^T" << endl;
-cout << "(P*v).hnormalized() = " << (P*v).hnormalized().transpose() << "]^T" << endl; \ No newline at end of file
+cout << "(P*v).hnormalized() = " << (P*v).hnormalized().transpose() << "]^T" << endl;
diff --git a/doc/snippets/MatrixBase_homogeneous.cpp b/doc/snippets/MatrixBase_homogeneous.cpp
index 457c28f91..263196097 100644
--- a/doc/snippets/MatrixBase_homogeneous.cpp
+++ b/doc/snippets/MatrixBase_homogeneous.cpp
@@ -3,4 +3,4 @@ Projective3d P(Matrix4d::Random());
cout << "v = [" << v.transpose() << "]^T" << endl;
cout << "h.homogeneous() = [" << v.homogeneous().transpose() << "]^T" << endl;
cout << "(P * v.homogeneous()) = [" << (P * v.homogeneous()).transpose() << "]^T" << endl;
-cout << "(P * v.homogeneous()).hnormalized() = [" << (P * v.homogeneous()).eval().hnormalized().transpose() << "]^T" << endl; \ No newline at end of file
+cout << "(P * v.homogeneous()).hnormalized() = [" << (P * v.homogeneous()).eval().hnormalized().transpose() << "]^T" << endl;
diff --git a/doc/snippets/MatrixBase_reshaped_auto.cpp b/doc/snippets/MatrixBase_reshaped_auto.cpp
new file mode 100644
index 000000000..59f9d3f60
--- /dev/null
+++ b/doc/snippets/MatrixBase_reshaped_auto.cpp
@@ -0,0 +1,4 @@
+Matrix4i m = Matrix4i::Random();
+cout << "Here is the matrix m:" << endl << m << endl;
+cout << "Here is m.reshaped(2, AutoSize):" << endl << m.reshaped(2, AutoSize) << endl;
+cout << "Here is m.reshaped<RowMajor>(AutoSize, fix<8>):" << endl << m.reshaped<RowMajor>(AutoSize, fix<8>) << endl;
diff --git a/doc/snippets/MatrixBase_reshaped_fixed.cpp b/doc/snippets/MatrixBase_reshaped_fixed.cpp
new file mode 100644
index 000000000..3e9e2cfb6
--- /dev/null
+++ b/doc/snippets/MatrixBase_reshaped_fixed.cpp
@@ -0,0 +1,3 @@
+Matrix4i m = Matrix4i::Random();
+cout << "Here is the matrix m:" << endl << m << endl;
+cout << "Here is m.reshaped(fix<2>,fix<8>):" << endl << m.reshaped(fix<2>,fix<8>) << endl;
diff --git a/doc/snippets/MatrixBase_reshaped_int_int.cpp b/doc/snippets/MatrixBase_reshaped_int_int.cpp
new file mode 100644
index 000000000..af4ca592f
--- /dev/null
+++ b/doc/snippets/MatrixBase_reshaped_int_int.cpp
@@ -0,0 +1,3 @@
+Matrix4i m = Matrix4i::Random();
+cout << "Here is the matrix m:" << endl << m << endl;
+cout << "Here is m.reshaped(2, 8):" << endl << m.reshaped(2, 8) << endl;
diff --git a/doc/snippets/MatrixBase_reshaped_to_vector.cpp b/doc/snippets/MatrixBase_reshaped_to_vector.cpp
new file mode 100644
index 000000000..37f65f7c6
--- /dev/null
+++ b/doc/snippets/MatrixBase_reshaped_to_vector.cpp
@@ -0,0 +1,4 @@
+Matrix4i m = Matrix4i::Random();
+cout << "Here is the matrix m:" << endl << m << endl;
+cout << "Here is m.reshaped().transpose():" << endl << m.reshaped().transpose() << endl;
+cout << "Here is m.reshaped<RowMajor>().transpose(): " << endl << m.reshaped<RowMajor>().transpose() << endl;
diff --git a/doc/snippets/Matrix_Map_stride.cpp b/doc/snippets/Matrix_Map_stride.cpp
new file mode 100644
index 000000000..ae42a127a
--- /dev/null
+++ b/doc/snippets/Matrix_Map_stride.cpp
@@ -0,0 +1,7 @@
+Matrix4i A;
+A << 1, 2, 3, 4,
+ 5, 6, 7, 8,
+ 9, 10, 11, 12,
+ 13, 14, 15, 16;
+
+std::cout << Matrix2i::Map(&A(1,1),Stride<8,2>()) << std::endl;
diff --git a/doc/snippets/Matrix_initializer_list_23_cxx11.cpp b/doc/snippets/Matrix_initializer_list_23_cxx11.cpp
new file mode 100644
index 000000000..60280ab58
--- /dev/null
+++ b/doc/snippets/Matrix_initializer_list_23_cxx11.cpp
@@ -0,0 +1,5 @@
+MatrixXd m {
+ {1, 2, 3},
+ {4, 5, 6}
+};
+cout << m << endl;
diff --git a/doc/snippets/Matrix_initializer_list_vector_cxx11.cpp b/doc/snippets/Matrix_initializer_list_vector_cxx11.cpp
new file mode 100644
index 000000000..325257cb0
--- /dev/null
+++ b/doc/snippets/Matrix_initializer_list_vector_cxx11.cpp
@@ -0,0 +1,2 @@
+VectorXi v {{1, 2}};
+cout << v << endl;
diff --git a/doc/snippets/Matrix_variadic_ctor_cxx11.cpp b/doc/snippets/Matrix_variadic_ctor_cxx11.cpp
new file mode 100644
index 000000000..06d33f571
--- /dev/null
+++ b/doc/snippets/Matrix_variadic_ctor_cxx11.cpp
@@ -0,0 +1,3 @@
+Matrix<int, 1, 6> a(1, 2, 3, 4, 5, 6);
+Matrix<int, 3, 1> b {1, 2, 3};
+cout << a << "\n\n" << b << endl;
diff --git a/doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp b/doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp
index cfc8b0d54..94b0d6ebd 100644
--- a/doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp
+++ b/doc/snippets/SelfAdjointEigenSolver_eigenvectors.cpp
@@ -1,4 +1,4 @@
MatrixXd ones = MatrixXd::Ones(3,3);
SelfAdjointEigenSolver<MatrixXd> es(ones);
cout << "The first eigenvector of the 3x3 matrix of ones is:"
- << endl << es.eigenvectors().col(1) << endl;
+ << endl << es.eigenvectors().col(0) << endl;
diff --git a/doc/snippets/Slicing_arrayexpr.cpp b/doc/snippets/Slicing_arrayexpr.cpp
new file mode 100644
index 000000000..2df818098
--- /dev/null
+++ b/doc/snippets/Slicing_arrayexpr.cpp
@@ -0,0 +1,4 @@
+ArrayXi ind(5); ind<<4,2,5,5,3;
+MatrixXi A = MatrixXi::Random(4,6);
+cout << "Initial matrix A:\n" << A << "\n\n";
+cout << "A(all,ind-1):\n" << A(all,ind-1) << "\n\n";
diff --git a/doc/snippets/Slicing_custom_padding_cxx11.cpp b/doc/snippets/Slicing_custom_padding_cxx11.cpp
new file mode 100644
index 000000000..24db98b7d
--- /dev/null
+++ b/doc/snippets/Slicing_custom_padding_cxx11.cpp
@@ -0,0 +1,12 @@
+struct pad {
+ Index size() const { return out_size; }
+ Index operator[] (Index i) const { return std::max<Index>(0,i-(out_size-in_size)); }
+ Index in_size, out_size;
+};
+
+Matrix3i A;
+A.reshaped() = VectorXi::LinSpaced(9,1,9);
+cout << "Initial matrix A:\n" << A << "\n\n";
+MatrixXi B(5,5);
+B = A(pad{3,5}, pad{3,5});
+cout << "A(pad{3,N}, pad{3,N}):\n" << B << "\n\n";
diff --git a/doc/snippets/Slicing_rawarray_cxx11.cpp b/doc/snippets/Slicing_rawarray_cxx11.cpp
new file mode 100644
index 000000000..1087131ab
--- /dev/null
+++ b/doc/snippets/Slicing_rawarray_cxx11.cpp
@@ -0,0 +1,5 @@
+#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
+MatrixXi A = MatrixXi::Random(4,6);
+cout << "Initial matrix A:\n" << A << "\n\n";
+cout << "A(all,{4,2,5,5,3}):\n" << A(all,{4,2,5,5,3}) << "\n\n";
+#endif
diff --git a/doc/snippets/Slicing_stdvector_cxx11.cpp b/doc/snippets/Slicing_stdvector_cxx11.cpp
new file mode 100644
index 000000000..555f6625f
--- /dev/null
+++ b/doc/snippets/Slicing_stdvector_cxx11.cpp
@@ -0,0 +1,4 @@
+std::vector<int> ind{4,2,5,5,3};
+MatrixXi A = MatrixXi::Random(4,6);
+cout << "Initial matrix A:\n" << A << "\n\n";
+cout << "A(all,ind):\n" << A(all,ind) << "\n\n";
diff --git a/doc/snippets/TopicAliasing_mult4.cpp b/doc/snippets/TopicAliasing_mult4.cpp
index 8a8992f6c..01c1c6d77 100644
--- a/doc/snippets/TopicAliasing_mult4.cpp
+++ b/doc/snippets/TopicAliasing_mult4.cpp
@@ -2,4 +2,4 @@ MatrixXf A(2,2), B(3,2);
B << 2, 0, 0, 3, 1, 1;
A << 2, 0, 0, -2;
A = (B * A).cwiseAbs();
-cout << A; \ No newline at end of file
+cout << A;
diff --git a/doc/snippets/Tridiagonalization_decomposeInPlace.cpp b/doc/snippets/Tridiagonalization_decomposeInPlace.cpp
index 93dcfca1d..3cdce679b 100644
--- a/doc/snippets/Tridiagonalization_decomposeInPlace.cpp
+++ b/doc/snippets/Tridiagonalization_decomposeInPlace.cpp
@@ -4,7 +4,8 @@ cout << "Here is a random symmetric 5x5 matrix:" << endl << A << endl << endl;
VectorXd diag(5);
VectorXd subdiag(4);
-internal::tridiagonalization_inplace(A, diag, subdiag, true);
+VectorXd hcoeffs(4); // Scratch space for householder reflector.
+internal::tridiagonalization_inplace(A, diag, subdiag, hcoeffs, true);
cout << "The orthogonal matrix Q is:" << endl << A << endl;
cout << "The diagonal of the tridiagonal matrix T is:" << endl << diag << endl;
cout << "The subdiagonal of the tridiagonal matrix T is:" << endl << subdiag << endl;
diff --git a/doc/snippets/Tutorial_ReshapeMat2Mat.cpp b/doc/snippets/Tutorial_ReshapeMat2Mat.cpp
index f84d6e76d..737afecb8 100644
--- a/doc/snippets/Tutorial_ReshapeMat2Mat.cpp
+++ b/doc/snippets/Tutorial_ReshapeMat2Mat.cpp
@@ -3,4 +3,4 @@ M1 << 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12;
Map<MatrixXf> M2(M1.data(), 6,2);
-cout << "M2:" << endl << M2 << endl; \ No newline at end of file
+cout << "M2:" << endl << M2 << endl;
diff --git a/doc/snippets/Tutorial_ReshapeMat2Vec.cpp b/doc/snippets/Tutorial_ReshapeMat2Vec.cpp
index 95bd4e0e6..32980a790 100644
--- a/doc/snippets/Tutorial_ReshapeMat2Vec.cpp
+++ b/doc/snippets/Tutorial_ReshapeMat2Vec.cpp
@@ -8,4 +8,4 @@ cout << "v1:" << endl << v1 << endl;
Matrix<float,Dynamic,Dynamic,RowMajor> M2(M1);
Map<RowVectorXf> v2(M2.data(), M2.size());
-cout << "v2:" << endl << v2 << endl; \ No newline at end of file
+cout << "v2:" << endl << v2 << endl;
diff --git a/doc/snippets/Tutorial_SlicingCol.cpp b/doc/snippets/Tutorial_SlicingCol.cpp
index f667ff689..695d13014 100644
--- a/doc/snippets/Tutorial_SlicingCol.cpp
+++ b/doc/snippets/Tutorial_SlicingCol.cpp
@@ -8,4 +8,4 @@ RowMajorMatrixXf M3(M1);
cout << "Row major input:" << endl << M3 << "\n";
Map<RowMajorMatrixXf,0,Stride<Dynamic,3> > M4(M3.data(), M3.rows(), (M3.cols()+2)/3,
Stride<Dynamic,3>(M3.outerStride(),3));
-cout << "1 column over 3:" << endl << M4 << "\n"; \ No newline at end of file
+cout << "1 column over 3:" << endl << M4 << "\n";
diff --git a/doc/snippets/Tutorial_SlicingVec.cpp b/doc/snippets/Tutorial_SlicingVec.cpp
index 07e10bf69..9b822464d 100644
--- a/doc/snippets/Tutorial_SlicingVec.cpp
+++ b/doc/snippets/Tutorial_SlicingVec.cpp
@@ -1,4 +1,4 @@
RowVectorXf v = RowVectorXf::LinSpaced(20,0,19);
cout << "Input:" << endl << v << endl;
Map<RowVectorXf,0,InnerStride<2> > v2(v.data(), v.size()/2);
-cout << "Even:" << v2 << endl; \ No newline at end of file
+cout << "Even:" << v2 << endl;
diff --git a/doc/snippets/Tutorial_range_for_loop_1d_cxx11.cpp b/doc/snippets/Tutorial_range_for_loop_1d_cxx11.cpp
new file mode 100644
index 000000000..e72e715d8
--- /dev/null
+++ b/doc/snippets/Tutorial_range_for_loop_1d_cxx11.cpp
@@ -0,0 +1,4 @@
+VectorXi v = VectorXi::Random(4);
+cout << "Here is the vector v:\n";
+for(auto x : v) cout << x << " ";
+cout << "\n";
diff --git a/doc/snippets/Tutorial_range_for_loop_2d_cxx11.cpp b/doc/snippets/Tutorial_range_for_loop_2d_cxx11.cpp
new file mode 100644
index 000000000..4a12d26c7
--- /dev/null
+++ b/doc/snippets/Tutorial_range_for_loop_2d_cxx11.cpp
@@ -0,0 +1,5 @@
+Matrix2i A = Matrix2i::Random();
+cout << "Here are the coeffs of the 2x2 matrix A:\n";
+for(auto x : A.reshaped())
+ cout << x << " ";
+cout << "\n";
diff --git a/doc/snippets/Tutorial_reshaped_vs_resize_1.cpp b/doc/snippets/Tutorial_reshaped_vs_resize_1.cpp
new file mode 100644
index 000000000..e520e8e6b
--- /dev/null
+++ b/doc/snippets/Tutorial_reshaped_vs_resize_1.cpp
@@ -0,0 +1,5 @@
+MatrixXi m = Matrix4i::Random();
+cout << "Here is the matrix m:" << endl << m << endl;
+cout << "Here is m.reshaped(2, 8):" << endl << m.reshaped(2, 8) << endl;
+m.resize(2,8);
+cout << "Here is the matrix m after m.resize(2,8):" << endl << m << endl;
diff --git a/doc/snippets/Tutorial_reshaped_vs_resize_2.cpp b/doc/snippets/Tutorial_reshaped_vs_resize_2.cpp
new file mode 100644
index 000000000..50dc45488
--- /dev/null
+++ b/doc/snippets/Tutorial_reshaped_vs_resize_2.cpp
@@ -0,0 +1,6 @@
+Matrix<int,Dynamic,Dynamic,RowMajor> m = Matrix4i::Random();
+cout << "Here is the matrix m:" << endl << m << endl;
+cout << "Here is m.reshaped(2, 8):" << endl << m.reshaped(2, 8) << endl;
+cout << "Here is m.reshaped<AutoOrder>(2, 8):" << endl << m.reshaped<AutoOrder>(2, 8) << endl;
+m.resize(2,8);
+cout << "Here is the matrix m after m.resize(2,8):" << endl << m << endl;
diff --git a/doc/snippets/Tutorial_std_sort.cpp b/doc/snippets/Tutorial_std_sort.cpp
new file mode 100644
index 000000000..cde2a6f1b
--- /dev/null
+++ b/doc/snippets/Tutorial_std_sort.cpp
@@ -0,0 +1,4 @@
+Array4i v = Array4i::Random().abs();
+cout << "Here is the initial vector v:\n" << v.transpose() << "\n";
+std::sort(v.begin(), v.end());
+cout << "Here is the sorted vector v:\n" << v.transpose() << "\n";
diff --git a/doc/snippets/Tutorial_std_sort_rows_cxx11.cpp b/doc/snippets/Tutorial_std_sort_rows_cxx11.cpp
new file mode 100644
index 000000000..03641603d
--- /dev/null
+++ b/doc/snippets/Tutorial_std_sort_rows_cxx11.cpp
@@ -0,0 +1,5 @@
+ArrayXXi A = ArrayXXi::Random(4,4).abs();
+cout << "Here is the initial matrix A:\n" << A << "\n";
+for(auto row : A.rowwise())
+ std::sort(row.begin(), row.end());
+cout << "Here is the sorted matrix A:\n" << A << "\n";
diff --git a/doc/snippets/VectorwiseOp_homogeneous.cpp b/doc/snippets/VectorwiseOp_homogeneous.cpp
index aba4fed0e..67cf5737d 100644
--- a/doc/snippets/VectorwiseOp_homogeneous.cpp
+++ b/doc/snippets/VectorwiseOp_homogeneous.cpp
@@ -1,7 +1,6 @@
-typedef Matrix<double,3,Dynamic> Matrix3Xd;
Matrix3Xd M = Matrix3Xd::Random(3,5);
Projective3d P(Matrix4d::Random());
cout << "The matrix M is:" << endl << M << endl << endl;
cout << "M.colwise().homogeneous():" << endl << M.colwise().homogeneous() << endl << endl;
cout << "P * M.colwise().homogeneous():" << endl << P * M.colwise().homogeneous() << endl << endl;
-cout << "P * M.colwise().homogeneous().hnormalized(): " << endl << (P * M.colwise().homogeneous()).colwise().hnormalized() << endl << endl; \ No newline at end of file
+cout << "P * M.colwise().homogeneous().hnormalized(): " << endl << (P * M.colwise().homogeneous()).colwise().hnormalized() << endl << endl;
diff --git a/doc/snippets/compile_snippet.cpp.in b/doc/snippets/compile_snippet.cpp.in
index d63f371a3..c11457a3f 100644
--- a/doc/snippets/compile_snippet.cpp.in
+++ b/doc/snippets/compile_snippet.cpp.in
@@ -15,6 +15,9 @@ using namespace std;
int main(int, char**)
{
cout.precision(3);
- ${snippet_source_code}
+// intentionally remove indentation of snippet
+{
+${snippet_source_code}
+}
return 0;
}
diff --git a/doc/snippets/tut_arithmetic_transpose_aliasing.cpp b/doc/snippets/tut_arithmetic_transpose_aliasing.cpp
index c8e4746d0..f82e6f2ac 100644
--- a/doc/snippets/tut_arithmetic_transpose_aliasing.cpp
+++ b/doc/snippets/tut_arithmetic_transpose_aliasing.cpp
@@ -2,4 +2,4 @@ Matrix2i a; a << 1, 2, 3, 4;
cout << "Here is the matrix a:\n" << a << endl;
a = a.transpose(); // !!! do NOT do this !!!
-cout << "and the result of the aliasing effect:\n" << a << endl; \ No newline at end of file
+cout << "and the result of the aliasing effect:\n" << a << endl;
diff --git a/doc/snippets/tut_arithmetic_transpose_inplace.cpp b/doc/snippets/tut_arithmetic_transpose_inplace.cpp
index 7a069ff23..5c81c9e02 100644
--- a/doc/snippets/tut_arithmetic_transpose_inplace.cpp
+++ b/doc/snippets/tut_arithmetic_transpose_inplace.cpp
@@ -3,4 +3,4 @@ cout << "Here is the initial matrix a:\n" << a << endl;
a.transposeInPlace();
-cout << "and after being transposed:\n" << a << endl; \ No newline at end of file
+cout << "and after being transposed:\n" << a << endl;
diff --git a/doc/special_examples/CMakeLists.txt b/doc/special_examples/CMakeLists.txt
index 101fbc5f9..5b00e8b1a 100644
--- a/doc/special_examples/CMakeLists.txt
+++ b/doc/special_examples/CMakeLists.txt
@@ -3,7 +3,7 @@ if(NOT EIGEN_TEST_NOQT)
if(QT4_FOUND)
include(${QT_USE_FILE})
endif()
-endif(NOT EIGEN_TEST_NOQT)
+endif()
if(QT4_FOUND)
add_executable(Tutorial_sparse_example Tutorial_sparse_example.cpp Tutorial_sparse_example_details.cpp)
@@ -17,9 +17,8 @@ if(QT4_FOUND)
)
add_dependencies(all_examples Tutorial_sparse_example)
-endif(QT4_FOUND)
+endif()
-check_cxx_compiler_flag("-std=c++11" EIGEN_COMPILER_SUPPORT_CPP11)
if(EIGEN_COMPILER_SUPPORT_CPP11)
add_executable(random_cpp11 random_cpp11.cpp)
target_link_libraries(random_cpp11 ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO})
diff --git a/doc/special_examples/Tutorial_sparse_example.cpp b/doc/special_examples/Tutorial_sparse_example.cpp
index 830e196ea..8850db052 100644
--- a/doc/special_examples/Tutorial_sparse_example.cpp
+++ b/doc/special_examples/Tutorial_sparse_example.cpp
@@ -1,5 +1,6 @@
#include <Eigen/Sparse>
#include <vector>
+#include <iostream>
typedef Eigen::SparseMatrix<double> SpMat; // declares a column-major sparse matrix type of double
typedef Eigen::Triplet<double> T;
@@ -9,10 +10,13 @@ void saveAsBitmap(const Eigen::VectorXd& x, int n, const char* filename);
int main(int argc, char** argv)
{
- assert(argc==2);
+ if(argc!=2) {
+ std::cerr << "Error: expected one and only one argument.\n";
+ return -1;
+ }
int n = 300; // size of the image
- int m = n*n; // number of unknows (=number of pixels)
+ int m = n*n; // number of unknowns (=number of pixels)
// Assembly:
std::vector<T> coefficients; // list of non-zeros coefficients