aboutsummaryrefslogtreecommitdiff
path: root/unsupported/Eigen
diff options
context:
space:
mode:
Diffstat (limited to 'unsupported/Eigen')
-rw-r--r--unsupported/Eigen/AdolcForward2
-rw-r--r--unsupported/Eigen/AlignedVector35
-rw-r--r--unsupported/Eigen/ArpackSupport31
-rw-r--r--unsupported/Eigen/AutoDiff2
-rw-r--r--unsupported/Eigen/BVH2
-rw-r--r--unsupported/Eigen/CMakeLists.txt2
-rw-r--r--unsupported/Eigen/FFT2
-rw-r--r--unsupported/Eigen/IterativeSolvers7
-rw-r--r--unsupported/Eigen/KroneckerProduct10
-rw-r--r--unsupported/Eigen/LevenbergMarquardt45
-rw-r--r--unsupported/Eigen/MPRealSupport179
-rw-r--r--unsupported/Eigen/MatrixFunctions93
-rw-r--r--unsupported/Eigen/MoreVectorization10
-rw-r--r--unsupported/Eigen/NonLinearOptimization4
-rw-r--r--unsupported/Eigen/NumericalDiff2
-rw-r--r--unsupported/Eigen/OpenGLSupport9
-rw-r--r--unsupported/Eigen/Polynomials13
-rw-r--r--unsupported/Eigen/SVD39
-rw-r--r--unsupported/Eigen/Skyline10
-rw-r--r--unsupported/Eigen/SparseExtra11
-rw-r--r--unsupported/Eigen/Splines2
-rw-r--r--unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h38
-rw-r--r--unsupported/Eigen/src/AutoDiff/AutoDiffVector.h4
-rw-r--r--unsupported/Eigen/src/BVH/BVAlgorithms.h2
-rw-r--r--unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h805
-rw-r--r--unsupported/Eigen/src/FFT/ei_kissfft_impl.h2
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h14
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/DGMRES.h542
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/GMRES.h16
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h278
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/IterationController.h7
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/MINRES.h302
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/Scaling.h22
-rw-r--r--unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h281
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/CopyrightMINPACK.txt52
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h85
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h202
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LMpar.h160
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h189
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h377
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h47
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h19
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h111
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixPower.h509
-rw-r--r--unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h58
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h29
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h52
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/chkder.h4
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/covar.h5
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/dogleg.h13
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/fdjac1.h5
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/lmpar.h18
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/r1updt.h8
-rw-r--r--unsupported/Eigen/src/NonLinearOptimization/rwupdt.h2
-rw-r--r--unsupported/Eigen/src/NumericalDiff/NumericalDiff.h10
-rw-r--r--unsupported/Eigen/src/Polynomials/Companion.h1
-rw-r--r--unsupported/Eigen/src/Polynomials/PolynomialSolver.h25
-rw-r--r--unsupported/Eigen/src/Polynomials/PolynomialUtils.h10
-rw-r--r--unsupported/Eigen/src/SVD/BDCSVD.h748
-rw-r--r--unsupported/Eigen/src/SVD/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/SVD/JacobiSVD.h782
-rw-r--r--unsupported/Eigen/src/SVD/SVDBase.h236
-rw-r--r--unsupported/Eigen/src/SVD/TODOBdcsvd.txt29
-rw-r--r--unsupported/Eigen/src/SVD/doneInBDCSVD.txt21
-rw-r--r--unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h8
-rw-r--r--unsupported/Eigen/src/SparseExtra/MarketIO.h6
-rw-r--r--unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h13
-rw-r--r--unsupported/Eigen/src/Splines/Spline.h22
-rw-r--r--unsupported/Eigen/src/Splines/SplineFitting.h3
70 files changed, 6205 insertions, 459 deletions
diff --git a/unsupported/Eigen/AdolcForward b/unsupported/Eigen/AdolcForward
index b96f9450e..2627decd0 100644
--- a/unsupported/Eigen/AdolcForward
+++ b/unsupported/Eigen/AdolcForward
@@ -44,7 +44,7 @@
namespace Eigen {
-/** \ingroup Unsupported_modules
+/**
* \defgroup AdolcForward_Module Adolc forward module
* This module provides support for adolc's adouble type in forward mode.
* ADOL-C is a C++ automatic differentiation library,
diff --git a/unsupported/Eigen/AlignedVector3 b/unsupported/Eigen/AlignedVector3
index 8ad0eb477..7b45e6cce 100644
--- a/unsupported/Eigen/AlignedVector3
+++ b/unsupported/Eigen/AlignedVector3
@@ -14,7 +14,7 @@
namespace Eigen {
-/** \ingroup Unsupported_modules
+/**
* \defgroup AlignedVector3_Module Aligned vector3 module
*
* \code
@@ -167,7 +167,8 @@ template<typename _Scalar> class AlignedVector3
inline Scalar norm() const
{
- return internal::sqrt(squaredNorm());
+ using std::sqrt;
+ return sqrt(squaredNorm());
}
inline AlignedVector3 cross(const AlignedVector3& other) const
diff --git a/unsupported/Eigen/ArpackSupport b/unsupported/Eigen/ArpackSupport
new file mode 100644
index 000000000..37a2799ef
--- /dev/null
+++ b/unsupported/Eigen/ArpackSupport
@@ -0,0 +1,31 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_ARPACKSUPPORT_MODULE_H
+#define EIGEN_ARPACKSUPPORT_MODULE_H
+
+#include <Eigen/Core>
+
+#include <Eigen/src/Core/util/DisableStupidWarnings.h>
+
+/** \defgroup ArpackSupport_Module Arpack support module
+ *
+ * This module provides a wrapper to Arpack, a library for sparse eigenvalue decomposition.
+ *
+ * \code
+ * #include <Eigen/ArpackSupport>
+ * \endcode
+ */
+
+#include <Eigen/SparseCholesky>
+#include "src/Eigenvalues/ArpackSelfAdjointEigenSolver.h"
+
+#include <Eigen/src/Core/util/ReenableStupidWarnings.h>
+
+#endif // EIGEN_ARPACKSUPPORT_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/AutoDiff b/unsupported/Eigen/AutoDiff
index 3c73b424e..abf5b7d67 100644
--- a/unsupported/Eigen/AutoDiff
+++ b/unsupported/Eigen/AutoDiff
@@ -12,7 +12,7 @@
namespace Eigen {
-/** \ingroup Unsupported_modules
+/**
* \defgroup AutoDiff_Module Auto Diff module
*
* This module features forward automatic differentation via a simple
diff --git a/unsupported/Eigen/BVH b/unsupported/Eigen/BVH
index 860a7dd89..0161a5402 100644
--- a/unsupported/Eigen/BVH
+++ b/unsupported/Eigen/BVH
@@ -18,7 +18,7 @@
namespace Eigen {
-/** \ingroup Unsupported_modules
+/**
* \defgroup BVH_Module BVH module
* \brief This module provides generic bounding volume hierarchy algorithms
* and reference tree implementations.
diff --git a/unsupported/Eigen/CMakeLists.txt b/unsupported/Eigen/CMakeLists.txt
index e961e72c5..e06f1238b 100644
--- a/unsupported/Eigen/CMakeLists.txt
+++ b/unsupported/Eigen/CMakeLists.txt
@@ -1,6 +1,6 @@
set(Eigen_HEADERS AdolcForward BVH IterativeSolvers MatrixFunctions MoreVectorization AutoDiff AlignedVector3 Polynomials
FFT NonLinearOptimization SparseExtra IterativeSolvers
- NumericalDiff Skyline MPRealSupport OpenGLSupport KroneckerProduct Splines
+ NumericalDiff Skyline MPRealSupport OpenGLSupport KroneckerProduct Splines LevenbergMarquardt
)
install(FILES
diff --git a/unsupported/Eigen/FFT b/unsupported/Eigen/FFT
index d233c6d5f..2c45b3999 100644
--- a/unsupported/Eigen/FFT
+++ b/unsupported/Eigen/FFT
@@ -16,7 +16,7 @@
#include <Eigen/Core>
-/** \ingroup Unsupported_modules
+/**
* \defgroup FFT_Module Fast Fourier Transform module
*
* \code
diff --git a/unsupported/Eigen/IterativeSolvers b/unsupported/Eigen/IterativeSolvers
index 6c6946d91..aa15403db 100644
--- a/unsupported/Eigen/IterativeSolvers
+++ b/unsupported/Eigen/IterativeSolvers
@@ -12,7 +12,7 @@
#include <Eigen/Sparse>
-/** \ingroup Unsupported_modules
+/**
* \defgroup IterativeSolvers_Module Iterative solvers module
* This module aims to provide various iterative linear and non linear solver algorithms.
* It currently provides:
@@ -27,13 +27,18 @@
#include "../../Eigen/src/misc/Solve.h"
#include "../../Eigen/src/misc/SparseSolve.h"
+#ifndef EIGEN_MPL2_ONLY
#include "src/IterativeSolvers/IterationController.h"
#include "src/IterativeSolvers/ConstrainedConjGrad.h"
+#endif
+
#include "src/IterativeSolvers/IncompleteLU.h"
#include "../../Eigen/Jacobi"
#include "../../Eigen/Householder"
#include "src/IterativeSolvers/GMRES.h"
+#include "src/IterativeSolvers/IncompleteCholesky.h"
//#include "src/IterativeSolvers/SSORPreconditioner.h"
+#include "src/IterativeSolvers/MINRES.h"
//@}
diff --git a/unsupported/Eigen/KroneckerProduct b/unsupported/Eigen/KroneckerProduct
index 796e386ad..c932c06a6 100644
--- a/unsupported/Eigen/KroneckerProduct
+++ b/unsupported/Eigen/KroneckerProduct
@@ -1,3 +1,11 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
#ifndef EIGEN_KRONECKER_PRODUCT_MODULE_H
#define EIGEN_KRONECKER_PRODUCT_MODULE_H
@@ -7,7 +15,7 @@
namespace Eigen {
-/** \ingroup Unsupported_modules
+/**
* \defgroup KroneckerProduct_Module KroneckerProduct module
*
* This module contains an experimental Kronecker product implementation.
diff --git a/unsupported/Eigen/LevenbergMarquardt b/unsupported/Eigen/LevenbergMarquardt
new file mode 100644
index 000000000..0fe2680ba
--- /dev/null
+++ b/unsupported/Eigen/LevenbergMarquardt
@@ -0,0 +1,45 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_LEVENBERGMARQUARDT_MODULE
+#define EIGEN_LEVENBERGMARQUARDT_MODULE
+
+// #include <vector>
+
+#include <Eigen/Core>
+#include <Eigen/Jacobi>
+#include <Eigen/QR>
+#include <unsupported/Eigen/NumericalDiff>
+
+#include <Eigen/SparseQR>
+
+/**
+ * \defgroup LevenbergMarquardt_Module Levenberg-Marquardt module
+ *
+ * \code
+ * #include </Eigen/LevenbergMarquardt>
+ * \endcode
+ *
+ *
+ */
+
+#include "Eigen/SparseCore"
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+
+#include "src/LevenbergMarquardt/LMqrsolv.h"
+#include "src/LevenbergMarquardt/LMcovar.h"
+#include "src/LevenbergMarquardt/LMpar.h"
+
+#endif
+
+#include "src/LevenbergMarquardt/LevenbergMarquardt.h"
+#include "src/LevenbergMarquardt/LMonestep.h"
+
+
+#endif // EIGEN_LEVENBERGMARQUARDT_MODULE
diff --git a/unsupported/Eigen/MPRealSupport b/unsupported/Eigen/MPRealSupport
index 3895623fe..d4b03647d 100644
--- a/unsupported/Eigen/MPRealSupport
+++ b/unsupported/Eigen/MPRealSupport
@@ -1,41 +1,36 @@
// This file is part of a joint effort between Eigen, a lightweight C++ template library
// for linear algebra, and MPFR C++, a C++ interface to MPFR library (http://www.holoborodko.com/pavel/)
//
-// Copyright (C) 2010 Pavel Holoborodko <pavel@holoborodko.com>
+// Copyright (C) 2010-2012 Pavel Holoborodko <pavel@holoborodko.com>
// Copyright (C) 2010 Konstantin Holoborodko <konstantin@holoborodko.com>
// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-//
-// Contributors:
-// Brian Gladman, Helmut Jarausch, Fokko Beekhof, Ulrich Mutze, Heinz van Saanen, Pere Constans
#ifndef EIGEN_MPREALSUPPORT_MODULE_H
#define EIGEN_MPREALSUPPORT_MODULE_H
-#include <ctime>
-#include <mpreal.h>
#include <Eigen/Core>
+#include <mpreal.h>
namespace Eigen {
- /** \ingroup Unsupported_modules
- * \defgroup MPRealSupport_Module MPFRC++ Support module
- *
- * \code
- * #include <Eigen/MPRealSupport>
- * \endcode
- *
- * This module provides support for multi precision floating point numbers
- * via the <a href="http://www.holoborodko.com/pavel/mpfr">MPFR C++</a>
- * library which itself is built upon <a href="http://www.mpfr.org/">MPFR</a>/<a href="http://gmplib.org/">GMP</a>.
- *
- * You can find a copy of MPFR C++ that is known to be compatible in the unsupported/test/mpreal folder.
- *
- * Here is an example:
- *
+/**
+ * \defgroup MPRealSupport_Module MPFRC++ Support module
+ * \code
+ * #include <Eigen/MPRealSupport>
+ * \endcode
+ *
+ * This module provides support for multi precision floating point numbers
+ * via the <a href="http://www.holoborodko.com/pavel/mpfr">MPFR C++</a>
+ * library which itself is built upon <a href="http://www.mpfr.org/">MPFR</a>/<a href="http://gmplib.org/">GMP</a>.
+ *
+ * You can find a copy of MPFR C++ that is known to be compatible in the unsupported/test/mpreal folder.
+ *
+ * Here is an example:
+ *
\code
#include <iostream>
#include <Eigen/MPRealSupport>
@@ -59,9 +54,9 @@ int main()
return 0;
}
\endcode
- *
- */
-
+ *
+ */
+
template<> struct NumTraits<mpfr::mpreal>
: GenericNumTraits<mpfr::mpreal>
{
@@ -77,72 +72,132 @@ int main()
typedef mpfr::mpreal Real;
typedef mpfr::mpreal NonInteger;
-
- inline static mpfr::mpreal highest() { return mpfr::mpreal_max(mpfr::mpreal::get_default_prec()); }
- inline static mpfr::mpreal lowest() { return -mpfr::mpreal_max(mpfr::mpreal::get_default_prec()); }
-
- inline static Real epsilon()
- {
- return mpfr::machine_epsilon(mpfr::mpreal::get_default_prec());
- }
- inline static Real dummy_precision()
- {
- unsigned int weak_prec = ((mpfr::mpreal::get_default_prec()-1)*90)/100;
- return mpfr::machine_epsilon(weak_prec);
+
+ inline static Real highest (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::maxval(Precision); }
+ inline static Real lowest (long Precision = mpfr::mpreal::get_default_prec()) { return -mpfr::maxval(Precision); }
+
+ // Constants
+ inline static Real Pi (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_pi(Precision); }
+ inline static Real Euler (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_euler(Precision); }
+ inline static Real Log2 (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_log2(Precision); }
+ inline static Real Catalan (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_catalan(Precision); }
+
+ inline static Real epsilon (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::machine_epsilon(Precision); }
+ inline static Real epsilon (const Real& x) { return mpfr::machine_epsilon(x); }
+
+ inline static Real dummy_precision()
+ {
+ unsigned int weak_prec = ((mpfr::mpreal::get_default_prec()-1) * 90) / 100;
+ return mpfr::machine_epsilon(weak_prec);
}
};
-namespace internal {
+ namespace internal {
- template<> mpfr::mpreal random<mpfr::mpreal>()
+ template<> inline mpfr::mpreal random<mpfr::mpreal>()
{
-#if (MPFR_VERSION >= MPFR_VERSION_NUM(3,0,0))
- static gmp_randstate_t state;
- static bool isFirstTime = true;
-
- if(isFirstTime)
- {
- gmp_randinit_default(state);
- gmp_randseed_ui(state,(unsigned)time(NULL));
- isFirstTime = false;
- }
-
- return mpfr::urandom(state)*2-1;
-#else
- return mpfr::mpreal(random<double>());
-#endif
+ return mpfr::random();
}
- template<> mpfr::mpreal random<mpfr::mpreal>(const mpfr::mpreal& a, const mpfr::mpreal& b)
+ template<> inline mpfr::mpreal random<mpfr::mpreal>(const mpfr::mpreal& a, const mpfr::mpreal& b)
{
return a + (b-a) * random<mpfr::mpreal>();
}
- bool isMuchSmallerThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec)
+ inline bool isMuchSmallerThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps)
{
- return mpfr::abs(a) <= mpfr::abs(b) * prec;
+ return mpfr::abs(a) <= mpfr::abs(b) * eps;
}
- inline bool isApprox(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec)
+ inline bool isApprox(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps)
{
- return mpfr::abs(a - b) <= (mpfr::min)(mpfr::abs(a), mpfr::abs(b)) * prec;
+ return mpfr::isEqualFuzzy(a,b,eps);
}
- inline bool isApproxOrLessThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& prec)
+ inline bool isApproxOrLessThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps)
{
- return a <= b || isApprox(a, b, prec);
+ return a <= b || mpfr::isEqualFuzzy(a,b,eps);
}
-
+
template<> inline long double cast<mpfr::mpreal,long double>(const mpfr::mpreal& x)
{ return x.toLDouble(); }
+
template<> inline double cast<mpfr::mpreal,double>(const mpfr::mpreal& x)
{ return x.toDouble(); }
+
template<> inline long cast<mpfr::mpreal,long>(const mpfr::mpreal& x)
{ return x.toLong(); }
+
template<> inline int cast<mpfr::mpreal,int>(const mpfr::mpreal& x)
{ return int(x.toLong()); }
-} // end namespace internal
+ // Specialize GEBP kernel and traits for mpreal (no need for peeling, nor complicated stuff)
+ // This also permits to directly call mpfr's routines and avoid many temporaries produced by mpreal
+ template<>
+ class gebp_traits<mpfr::mpreal, mpfr::mpreal, false, false>
+ {
+ public:
+ typedef mpfr::mpreal ResScalar;
+ enum {
+ nr = 2, // must be 2 for proper packing...
+ mr = 1,
+ WorkSpaceFactor = nr,
+ LhsProgress = 1,
+ RhsProgress = 1
+ };
+ };
+
+ template<typename Index, int mr, int nr, bool ConjugateLhs, bool ConjugateRhs>
+ struct gebp_kernel<mpfr::mpreal,mpfr::mpreal,Index,mr,nr,ConjugateLhs,ConjugateRhs>
+ {
+ typedef mpfr::mpreal mpreal;
+
+ EIGEN_DONT_INLINE
+ void operator()(mpreal* res, Index resStride, const mpreal* blockA, const mpreal* blockB, Index rows, Index depth, Index cols, mpreal alpha,
+ Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, mpreal* /*unpackedB*/ = 0)
+ {
+ mpreal acc1, acc2, tmp;
+
+ if(strideA==-1) strideA = depth;
+ if(strideB==-1) strideB = depth;
+
+ for(Index j=0; j<cols; j+=nr)
+ {
+ Index actual_nr = (std::min<Index>)(nr,cols-j);
+ mpreal *C1 = res + j*resStride;
+ mpreal *C2 = res + (j+1)*resStride;
+ for(Index i=0; i<rows; i++)
+ {
+ mpreal *B = const_cast<mpreal*>(blockB) + j*strideB + offsetB*actual_nr;
+ mpreal *A = const_cast<mpreal*>(blockA) + i*strideA + offsetA;
+ acc1 = 0;
+ acc2 = 0;
+ for(Index k=0; k<depth; k++)
+ {
+ mpfr_mul(tmp.mpfr_ptr(), A[k].mpfr_ptr(), B[0].mpfr_ptr(), mpreal::get_default_rnd());
+ mpfr_add(acc1.mpfr_ptr(), acc1.mpfr_ptr(), tmp.mpfr_ptr(), mpreal::get_default_rnd());
+
+ if(actual_nr==2) {
+ mpfr_mul(tmp.mpfr_ptr(), A[k].mpfr_ptr(), B[1].mpfr_ptr(), mpreal::get_default_rnd());
+ mpfr_add(acc2.mpfr_ptr(), acc2.mpfr_ptr(), tmp.mpfr_ptr(), mpreal::get_default_rnd());
+ }
+
+ B+=actual_nr;
+ }
+
+ mpfr_mul(acc1.mpfr_ptr(), acc1.mpfr_ptr(), alpha.mpfr_ptr(), mpreal::get_default_rnd());
+ mpfr_add(C1[i].mpfr_ptr(), C1[i].mpfr_ptr(), acc1.mpfr_ptr(), mpreal::get_default_rnd());
+
+ if(actual_nr==2) {
+ mpfr_mul(acc2.mpfr_ptr(), acc2.mpfr_ptr(), alpha.mpfr_ptr(), mpreal::get_default_rnd());
+ mpfr_add(C2[i].mpfr_ptr(), C2[i].mpfr_ptr(), acc2.mpfr_ptr(), mpreal::get_default_rnd());
+ }
+ }
+ }
+ }
+ };
+
+ } // end namespace internal
}
#endif // EIGEN_MPREALSUPPORT_MODULE_H
diff --git a/unsupported/Eigen/MatrixFunctions b/unsupported/Eigen/MatrixFunctions
index 56ab71cd3..0991817d5 100644
--- a/unsupported/Eigen/MatrixFunctions
+++ b/unsupported/Eigen/MatrixFunctions
@@ -2,6 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2009 Jitse Niesen <jitse@maths.leeds.ac.uk>
+// Copyright (C) 2012 Chen-Pang He <jdh8@ms63.hinet.net>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -19,7 +20,7 @@
#include <Eigen/LU>
#include <Eigen/Eigenvalues>
-/** \ingroup Unsupported_modules
+/**
* \defgroup MatrixFunctions_Module Matrix functions module
* \brief This module aims to provide various methods for the computation of
* matrix functions.
@@ -35,6 +36,7 @@
* - \ref matrixbase_cosh "MatrixBase::cosh()", for computing the matrix hyperbolic cosine
* - \ref matrixbase_exp "MatrixBase::exp()", for computing the matrix exponential
* - \ref matrixbase_log "MatrixBase::log()", for computing the matrix logarithm
+ * - \ref matrixbase_pow "MatrixBase::pow()", for computing the matrix power
* - \ref matrixbase_matrixfunction "MatrixBase::matrixFunction()", for computing general matrix functions
* - \ref matrixbase_sin "MatrixBase::sin()", for computing the matrix sine
* - \ref matrixbase_sinh "MatrixBase::sinh()", for computing the matrix hyperbolic sine
@@ -57,19 +59,21 @@
#include "src/MatrixFunctions/MatrixFunction.h"
#include "src/MatrixFunctions/MatrixSquareRoot.h"
#include "src/MatrixFunctions/MatrixLogarithm.h"
-
+#include "src/MatrixFunctions/MatrixPower.h"
/**
-\page matrixbaseextra MatrixBase methods defined in the MatrixFunctions module
+\page matrixbaseextra_page
\ingroup MatrixFunctions_Module
+\section matrixbaseextra MatrixBase methods defined in the MatrixFunctions module
+
The remainder of the page documents the following MatrixBase methods
which are defined in the MatrixFunctions module.
-\section matrixbase_cos MatrixBase::cos()
+\subsection matrixbase_cos MatrixBase::cos()
Compute the matrix cosine.
@@ -86,7 +90,7 @@ This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdSt
-\section matrixbase_cosh MatrixBase::cosh()
+\subsection matrixbase_cosh MatrixBase::cosh()
Compute the matrix hyberbolic cosine.
@@ -103,7 +107,7 @@ This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdSt
-\section matrixbase_exp MatrixBase::exp()
+\subsection matrixbase_exp MatrixBase::exp()
Compute the matrix exponential.
@@ -158,7 +162,7 @@ Output: \verbinclude MatrixExponential.out
\c complex<float>, \c complex<double>, or \c complex<long double> .
-\section matrixbase_log MatrixBase::log()
+\subsection matrixbase_log MatrixBase::log()
Compute the matrix logarithm.
@@ -209,14 +213,77 @@ documentation of \ref matrixbase_exp "exp()".
\include MatrixLogarithm.cpp
Output: \verbinclude MatrixLogarithm.out
-\note \p M has to be a matrix of \c float, \c double, \c long double
-\c complex<float>, \c complex<double>, or \c complex<long double> .
+\note \p M has to be a matrix of \c float, \c double, <tt>long
+double</tt>, \c complex<float>, \c complex<double>, or \c complex<long
+double> .
\sa MatrixBase::exp(), MatrixBase::matrixFunction(),
class MatrixLogarithmAtomic, MatrixBase::sqrt().
-\section matrixbase_matrixfunction MatrixBase::matrixFunction()
+\subsection matrixbase_pow MatrixBase::pow()
+
+Compute the matrix raised to arbitrary real power.
+
+\code
+const MatrixPowerReturnValue<Derived> MatrixBase<Derived>::pow(RealScalar p) const
+\endcode
+
+\param[in] M base of the matrix power, should be a square matrix.
+\param[in] p exponent of the matrix power, should be real.
+
+The matrix power \f$ M^p \f$ is defined as \f$ \exp(p \log(M)) \f$,
+where exp denotes the matrix exponential, and log denotes the matrix
+logarithm.
+
+The matrix \f$ M \f$ should meet the conditions to be an argument of
+matrix logarithm. If \p p is not of the real scalar type of \p M, it
+is casted into the real scalar type of \p M.
+
+This function computes the matrix power using the Schur-Pad&eacute;
+algorithm as implemented by class MatrixPower. The exponent is split
+into integral part and fractional part, where the fractional part is
+in the interval \f$ (-1, 1) \f$. The main diagonal and the first
+super-diagonal is directly computed.
+
+Details of the algorithm can be found in: Nicholas J. Higham and
+Lijing Lin, "A Schur-Pad&eacute; algorithm for fractional powers of a
+matrix," <em>SIAM J. %Matrix Anal. Applic.</em>,
+<b>32(3)</b>:1056&ndash;1078, 2011.
+
+Example: The following program checks that
+\f[ \left[ \begin{array}{ccc}
+ \cos1 & -\sin1 & 0 \\
+ \sin1 & \cos1 & 0 \\
+ 0 & 0 & 1
+ \end{array} \right]^{\frac14\pi} = \left[ \begin{array}{ccc}
+ \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\
+ \frac12\sqrt2 & \frac12\sqrt2 & 0 \\
+ 0 & 0 & 1
+ \end{array} \right]. \f]
+This corresponds to \f$ \frac14\pi \f$ rotations of 1 radian around
+the z-axis.
+
+\include MatrixPower.cpp
+Output: \verbinclude MatrixPower.out
+
+MatrixBase::pow() is user-friendly. However, there are some
+circumstances under which you should use class MatrixPower directly.
+MatrixPower can save the result of Schur decomposition, so it's
+better for computing various powers for the same matrix.
+
+Example:
+\include MatrixPower_optimal.cpp
+Output: \verbinclude MatrixPower_optimal.out
+
+\note \p M has to be a matrix of \c float, \c double, <tt>long
+double</tt>, \c complex<float>, \c complex<double>, or \c complex<long
+double> .
+
+\sa MatrixBase::exp(), MatrixBase::log(), class MatrixPower.
+
+
+\subsection matrixbase_matrixfunction MatrixBase::matrixFunction()
Compute a matrix function.
@@ -272,7 +339,7 @@ A.matrixFunction(StdStemFunctions<std::complex<double> >::exp, &B);
-\section matrixbase_sin MatrixBase::sin()
+\subsection matrixbase_sin MatrixBase::sin()
Compute the matrix sine.
@@ -290,7 +357,7 @@ Output: \verbinclude MatrixSine.out
-\section matrixbase_sinh MatrixBase::sinh()
+\subsection matrixbase_sinh MatrixBase::sinh()
Compute the matrix hyperbolic sine.
@@ -307,7 +374,7 @@ Example: \include MatrixSinh.cpp
Output: \verbinclude MatrixSinh.out
-\section matrixbase_sqrt MatrixBase::sqrt()
+\subsection matrixbase_sqrt MatrixBase::sqrt()
Compute the matrix square root.
diff --git a/unsupported/Eigen/MoreVectorization b/unsupported/Eigen/MoreVectorization
index 9f0a39f75..470e72430 100644
--- a/unsupported/Eigen/MoreVectorization
+++ b/unsupported/Eigen/MoreVectorization
@@ -1,3 +1,11 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
#ifndef EIGEN_MOREVECTORIZATION_MODULE_H
#define EIGEN_MOREVECTORIZATION_MODULE_H
@@ -5,7 +13,7 @@
namespace Eigen {
-/** \ingroup Unsupported_modules
+/**
* \defgroup MoreVectorization More vectorization module
*/
diff --git a/unsupported/Eigen/NonLinearOptimization b/unsupported/Eigen/NonLinearOptimization
index cf6ca58f8..600ab4c12 100644
--- a/unsupported/Eigen/NonLinearOptimization
+++ b/unsupported/Eigen/NonLinearOptimization
@@ -1,4 +1,4 @@
-// This file is part of Eugenio, a lightweight C++ template library
+// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
@@ -17,7 +17,7 @@
#include <Eigen/QR>
#include <unsupported/Eigen/NumericalDiff>
-/** \ingroup Unsupported_modules
+/**
* \defgroup NonLinearOptimization_Module Non linear optimization module
*
* \code
diff --git a/unsupported/Eigen/NumericalDiff b/unsupported/Eigen/NumericalDiff
index b3480312d..433334ca8 100644
--- a/unsupported/Eigen/NumericalDiff
+++ b/unsupported/Eigen/NumericalDiff
@@ -14,7 +14,7 @@
namespace Eigen {
-/** \ingroup Unsupported_modules
+/**
* \defgroup NumericalDiff_Module Numerical differentiation module
*
* \code
diff --git a/unsupported/Eigen/OpenGLSupport b/unsupported/Eigen/OpenGLSupport
index e66a425f8..c4090ab11 100644
--- a/unsupported/Eigen/OpenGLSupport
+++ b/unsupported/Eigen/OpenGLSupport
@@ -11,11 +11,16 @@
#define EIGEN_OPENGL_MODULE
#include <Eigen/Geometry>
-#include <GL/gl.h>
+
+#if defined(__APPLE_CC__)
+ #include <OpenGL/gl.h>
+#else
+ #include <GL/gl.h>
+#endif
namespace Eigen {
-/** \ingroup Unsupported_modules
+/**
* \defgroup OpenGLSUpport_Module OpenGL Support module
*
* This module provides wrapper functions for a couple of OpenGL functions
diff --git a/unsupported/Eigen/Polynomials b/unsupported/Eigen/Polynomials
index fa58b006d..cece56337 100644
--- a/unsupported/Eigen/Polynomials
+++ b/unsupported/Eigen/Polynomials
@@ -1,3 +1,11 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
#ifndef EIGEN_POLYNOMIALS_MODULE_H
#define EIGEN_POLYNOMIALS_MODULE_H
@@ -16,11 +24,8 @@
#undef EIGEN_HIDE_HEAVY_CODE
#endif
-/** \ingroup Unsupported_modules
+/**
* \defgroup Polynomials_Module Polynomials module
- *
- *
- *
* \brief This module provides a QR based polynomial solver.
*
* To use this module, add
diff --git a/unsupported/Eigen/SVD b/unsupported/Eigen/SVD
new file mode 100644
index 000000000..7cc059280
--- /dev/null
+++ b/unsupported/Eigen/SVD
@@ -0,0 +1,39 @@
+#ifndef EIGEN_SVD_MODULE_H
+#define EIGEN_SVD_MODULE_H
+
+#include <Eigen/QR>
+#include <Eigen/Householder>
+#include <Eigen/Jacobi>
+
+#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
+
+/** \defgroup SVD_Module SVD module
+ *
+ *
+ *
+ * This module provides SVD decomposition for matrices (both real and complex).
+ * This decomposition is accessible via the following MatrixBase method:
+ * - MatrixBase::jacobiSvd()
+ *
+ * \code
+ * #include <Eigen/SVD>
+ * \endcode
+ */
+
+#include "../../Eigen/src/misc/Solve.h"
+#include "../../Eigen/src/SVD/UpperBidiagonalization.h"
+#include "src/SVD/SVDBase.h"
+#include "src/SVD/JacobiSVD.h"
+#include "src/SVD/BDCSVD.h"
+#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT)
+#include "../../Eigen/src/SVD/JacobiSVD_MKL.h"
+#endif
+
+#ifdef EIGEN2_SUPPORT
+#include "../../Eigen/src/Eigen2Support/SVD.h"
+#endif
+
+#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h"
+
+#endif // EIGEN_SVD_MODULE_H
+/* vim: set filetype=cpp et sw=2 ts=2 ai: */
diff --git a/unsupported/Eigen/Skyline b/unsupported/Eigen/Skyline
index c9823f358..71a68cb42 100644
--- a/unsupported/Eigen/Skyline
+++ b/unsupported/Eigen/Skyline
@@ -1,3 +1,11 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
#ifndef EIGEN_SKYLINE_MODULE_H
#define EIGEN_SKYLINE_MODULE_H
@@ -11,7 +19,7 @@
#include <cstring>
#include <algorithm>
-/** \ingroup Unsupported_modules
+/**
* \defgroup Skyline_Module Skyline module
*
*
diff --git a/unsupported/Eigen/SparseExtra b/unsupported/Eigen/SparseExtra
index 340c34736..b5597902a 100644
--- a/unsupported/Eigen/SparseExtra
+++ b/unsupported/Eigen/SparseExtra
@@ -1,3 +1,12 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <g.gael@free.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
#ifndef EIGEN_SPARSE_EXTRA_MODULE_H
#define EIGEN_SPARSE_EXTRA_MODULE_H
@@ -17,7 +26,7 @@
#include <google/dense_hash_map>
#endif
-/** \ingroup Unsupported_modules
+/**
* \defgroup SparseExtra_Module SparseExtra module
*
* This module contains some experimental features extending the sparse module.
diff --git a/unsupported/Eigen/Splines b/unsupported/Eigen/Splines
index 801cec1a1..322e6b9f5 100644
--- a/unsupported/Eigen/Splines
+++ b/unsupported/Eigen/Splines
@@ -12,7 +12,7 @@
namespace Eigen
{
-/** \ingroup Unsupported_modules
+/**
* \defgroup Splines_Module Spline and spline fitting module
*
* This module provides a simple multi-dimensional spline class while
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
index b833df3c0..8d42e69b9 100644
--- a/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffScalar.h
@@ -47,8 +47,8 @@ template<typename _DerType, bool Enable> struct auto_diff_special_op;
*
* It supports the following list of global math function:
* - std::abs, std::sqrt, std::pow, std::exp, std::log, std::sin, std::cos,
- * - internal::abs, internal::sqrt, internal::pow, internal::exp, internal::log, internal::sin, internal::cos,
- * - internal::conj, internal::real, internal::imag, internal::abs2.
+ * - internal::abs, internal::sqrt, numext::pow, internal::exp, internal::log, internal::sin, internal::cos,
+ * - internal::conj, internal::real, internal::imag, numext::abs2.
*
* AutoDiffScalar can be used as the scalar type of an Eigen::Matrix object. However,
* in that case, the expression template mechanism only occurs at the top Matrix level,
@@ -489,20 +489,32 @@ struct make_coherent_impl<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows,
}
};
-template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols> struct scalar_product_traits<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols>,A_Scalar>
+template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols>
+struct scalar_product_traits<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols>,A_Scalar>
{
- typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType;
+ enum { Defined = 1 };
+ typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType;
};
-template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols> struct scalar_product_traits<A_Scalar, Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> >
+template<typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols>
+struct scalar_product_traits<A_Scalar, Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> >
{
- typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType;
+ enum { Defined = 1 };
+ typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> ReturnType;
};
template<typename DerType>
struct scalar_product_traits<AutoDiffScalar<DerType>,typename DerType::Scalar>
{
- typedef AutoDiffScalar<DerType> ReturnType;
+ enum { Defined = 1 };
+ typedef AutoDiffScalar<DerType> ReturnType;
+};
+
+template<typename DerType>
+struct scalar_product_traits<typename DerType::Scalar,AutoDiffScalar<DerType> >
+{
+ enum { Defined = 1 };
+ typedef AutoDiffScalar<DerType> ReturnType;
};
} // end namespace internal
@@ -532,14 +544,12 @@ inline AutoDiffScalar<DerType> (min)(const T& x, const AutoDiffScalar<DerType>&
template<typename DerType, typename T>
inline AutoDiffScalar<DerType> (max)(const T& x, const AutoDiffScalar<DerType>& y) { return (x > y ? x : y); }
-#define sign(x) x >= 0 ? 1 : -1 // required for abs function below
-
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs,
using std::abs;
- return ReturnType(abs(x.value()), x.derivatives() * (sign(x.value())));)
+ return ReturnType(abs(x.value()), x.derivatives() * (x.value()<0 ? -1 : 1) );)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs2,
- using internal::abs2;
+ using numext::abs2;
return ReturnType(abs2(x.value()), x.derivatives() * (Scalar(2)*x.value()));)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sqrt,
@@ -602,17 +612,17 @@ atan2(const AutoDiffScalar<DerTypeA>& a, const AutoDiffScalar<DerTypeB>& b)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(tan,
using std::tan;
using std::cos;
- return ReturnType(tan(x.value()),x.derivatives() * (Scalar(1)/internal::abs2(cos(x.value()))));)
+ return ReturnType(tan(x.value()),x.derivatives() * (Scalar(1)/numext::abs2(cos(x.value()))));)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(asin,
using std::sqrt;
using std::asin;
- return ReturnType(asin(x.value()),x.derivatives() * (Scalar(1)/sqrt(1-internal::abs2(x.value()))));)
+ return ReturnType(asin(x.value()),x.derivatives() * (Scalar(1)/sqrt(1-numext::abs2(x.value()))));)
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(acos,
using std::sqrt;
using std::acos;
- return ReturnType(acos(x.value()),x.derivatives() * (Scalar(-1)/sqrt(1-internal::abs2(x.value()))));)
+ return ReturnType(acos(x.value()),x.derivatives() * (Scalar(-1)/sqrt(1-numext::abs2(x.value()))));)
#undef EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY
diff --git a/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h b/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h
index 0540add0a..8c2d04830 100644
--- a/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h
+++ b/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h
@@ -21,8 +21,8 @@ namespace Eigen {
*
* It supports the following list of global math function:
* - std::abs, std::sqrt, std::pow, std::exp, std::log, std::sin, std::cos,
- * - internal::abs, internal::sqrt, internal::pow, internal::exp, internal::log, internal::sin, internal::cos,
- * - internal::conj, internal::real, internal::imag, internal::abs2.
+ * - internal::abs, internal::sqrt, numext::pow, internal::exp, internal::log, internal::sin, internal::cos,
+ * - internal::conj, internal::real, internal::imag, numext::abs2.
*
* AutoDiffScalar can be used as the scalar type of an Eigen::Matrix object. However,
* in that case, the expression template mechanism only occurs at the top Matrix level,
diff --git a/unsupported/Eigen/src/BVH/BVAlgorithms.h b/unsupported/Eigen/src/BVH/BVAlgorithms.h
index e5b51decb..994c8af54 100644
--- a/unsupported/Eigen/src/BVH/BVAlgorithms.h
+++ b/unsupported/Eigen/src/BVH/BVAlgorithms.h
@@ -189,7 +189,7 @@ struct minimizer_helper1
Object2 stored;
Minimizer &minimizer;
private:
- minimizer_helper1& operator=(const minimizer_helper1&) {}
+ minimizer_helper1& operator=(const minimizer_helper1&);
};
template<typename Volume2, typename Object2, typename Object1, typename Minimizer>
diff --git a/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h b/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
new file mode 100644
index 000000000..3b6a69aff
--- /dev/null
+++ b/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h
@@ -0,0 +1,805 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 David Harmon <dharmon@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H
+#define EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H
+
+#include <Eigen/Dense>
+
+namespace Eigen {
+
+namespace internal {
+ template<typename Scalar, typename RealScalar> struct arpack_wrapper;
+ template<typename MatrixSolver, typename MatrixType, typename Scalar, bool BisSPD> struct OP;
+}
+
+
+
+template<typename MatrixType, typename MatrixSolver=SimplicialLLT<MatrixType>, bool BisSPD=false>
+class ArpackGeneralizedSelfAdjointEigenSolver
+{
+public:
+ //typedef typename MatrixSolver::MatrixType MatrixType;
+
+ /** \brief Scalar type for matrices of type \p MatrixType. */
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+
+ /** \brief Real scalar type for \p MatrixType.
+ *
+ * This is just \c Scalar if #Scalar is real (e.g., \c float or
+ * \c Scalar), and the type of the real part of \c Scalar if #Scalar is
+ * complex.
+ */
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** \brief Type for vector of eigenvalues as returned by eigenvalues().
+ *
+ * This is a column vector with entries of type #RealScalar.
+ * The length of the vector is the size of \p nbrEigenvalues.
+ */
+ typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVectorType;
+
+ /** \brief Default constructor.
+ *
+ * The default constructor is for cases in which the user intends to
+ * perform decompositions via compute().
+ *
+ */
+ ArpackGeneralizedSelfAdjointEigenSolver()
+ : m_eivec(),
+ m_eivalues(),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_nbrConverged(0),
+ m_nbrIterations(0)
+ { }
+
+ /** \brief Constructor; computes generalized eigenvalues of given matrix with respect to another matrix.
+ *
+ * \param[in] A Self-adjoint matrix whose eigenvalues / eigenvectors will
+ * computed. By default, the upper triangular part is used, but can be changed
+ * through the template parameter.
+ * \param[in] B Self-adjoint matrix for the generalized eigenvalue problem.
+ * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute.
+ * Must be less than the size of the input matrix, or an error is returned.
+ * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with
+ * respective meanings to find the largest magnitude , smallest magnitude,
+ * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this
+ * value can contain floating point value in string form, in which case the
+ * eigenvalues closest to this value will be found.
+ * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
+ * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which
+ * means machine precision.
+ *
+ * This constructor calls compute(const MatrixType&, const MatrixType&, Index, string, int, RealScalar)
+ * to compute the eigenvalues of the matrix \p A with respect to \p B. The eigenvectors are computed if
+ * \p options equals #ComputeEigenvectors.
+ *
+ */
+ ArpackGeneralizedSelfAdjointEigenSolver(const MatrixType& A, const MatrixType& B,
+ Index nbrEigenvalues, std::string eigs_sigma="LM",
+ int options=ComputeEigenvectors, RealScalar tol=0.0)
+ : m_eivec(),
+ m_eivalues(),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_nbrConverged(0),
+ m_nbrIterations(0)
+ {
+ compute(A, B, nbrEigenvalues, eigs_sigma, options, tol);
+ }
+
+ /** \brief Constructor; computes eigenvalues of given matrix.
+ *
+ * \param[in] A Self-adjoint matrix whose eigenvalues / eigenvectors will
+ * computed. By default, the upper triangular part is used, but can be changed
+ * through the template parameter.
+ * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute.
+ * Must be less than the size of the input matrix, or an error is returned.
+ * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with
+ * respective meanings to find the largest magnitude , smallest magnitude,
+ * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this
+ * value can contain floating point value in string form, in which case the
+ * eigenvalues closest to this value will be found.
+ * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
+ * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which
+ * means machine precision.
+ *
+ * This constructor calls compute(const MatrixType&, Index, string, int, RealScalar)
+ * to compute the eigenvalues of the matrix \p A. The eigenvectors are computed if
+ * \p options equals #ComputeEigenvectors.
+ *
+ */
+
+ ArpackGeneralizedSelfAdjointEigenSolver(const MatrixType& A,
+ Index nbrEigenvalues, std::string eigs_sigma="LM",
+ int options=ComputeEigenvectors, RealScalar tol=0.0)
+ : m_eivec(),
+ m_eivalues(),
+ m_isInitialized(false),
+ m_eigenvectorsOk(false),
+ m_nbrConverged(0),
+ m_nbrIterations(0)
+ {
+ compute(A, nbrEigenvalues, eigs_sigma, options, tol);
+ }
+
+
+ /** \brief Computes generalized eigenvalues / eigenvectors of given matrix using the external ARPACK library.
+ *
+ * \param[in] A Selfadjoint matrix whose eigendecomposition is to be computed.
+ * \param[in] B Selfadjoint matrix for generalized eigenvalues.
+ * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute.
+ * Must be less than the size of the input matrix, or an error is returned.
+ * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with
+ * respective meanings to find the largest magnitude , smallest magnitude,
+ * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this
+ * value can contain floating point value in string form, in which case the
+ * eigenvalues closest to this value will be found.
+ * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
+ * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which
+ * means machine precision.
+ *
+ * \returns Reference to \c *this
+ *
+ * This function computes the generalized eigenvalues of \p A with respect to \p B using ARPACK. The eigenvalues()
+ * function can be used to retrieve them. If \p options equals #ComputeEigenvectors,
+ * then the eigenvectors are also computed and can be retrieved by
+ * calling eigenvectors().
+ *
+ */
+ ArpackGeneralizedSelfAdjointEigenSolver& compute(const MatrixType& A, const MatrixType& B,
+ Index nbrEigenvalues, std::string eigs_sigma="LM",
+ int options=ComputeEigenvectors, RealScalar tol=0.0);
+
+ /** \brief Computes eigenvalues / eigenvectors of given matrix using the external ARPACK library.
+ *
+ * \param[in] A Selfadjoint matrix whose eigendecomposition is to be computed.
+ * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute.
+ * Must be less than the size of the input matrix, or an error is returned.
+ * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with
+ * respective meanings to find the largest magnitude , smallest magnitude,
+ * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this
+ * value can contain floating point value in string form, in which case the
+ * eigenvalues closest to this value will be found.
+ * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
+ * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which
+ * means machine precision.
+ *
+ * \returns Reference to \c *this
+ *
+ * This function computes the eigenvalues of \p A using ARPACK. The eigenvalues()
+ * function can be used to retrieve them. If \p options equals #ComputeEigenvectors,
+ * then the eigenvectors are also computed and can be retrieved by
+ * calling eigenvectors().
+ *
+ */
+ ArpackGeneralizedSelfAdjointEigenSolver& compute(const MatrixType& A,
+ Index nbrEigenvalues, std::string eigs_sigma="LM",
+ int options=ComputeEigenvectors, RealScalar tol=0.0);
+
+
+ /** \brief Returns the eigenvectors of given matrix.
+ *
+ * \returns A const reference to the matrix whose columns are the eigenvectors.
+ *
+ * \pre The eigenvectors have been computed before.
+ *
+ * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
+ * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The
+ * eigenvectors are normalized to have (Euclidean) norm equal to one. If
+ * this object was used to solve the eigenproblem for the selfadjoint
+ * matrix \f$ A \f$, then the matrix returned by this function is the
+ * matrix \f$ V \f$ in the eigendecomposition \f$ A V = D V \f$.
+ * For the generalized eigenproblem, the matrix returned is the solution \f$ A V = D B V \f$
+ *
+ * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out
+ *
+ * \sa eigenvalues()
+ */
+ const Matrix<Scalar, Dynamic, Dynamic>& eigenvectors() const
+ {
+ eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized.");
+ eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+ return m_eivec;
+ }
+
+ /** \brief Returns the eigenvalues of given matrix.
+ *
+ * \returns A const reference to the column vector containing the eigenvalues.
+ *
+ * \pre The eigenvalues have been computed before.
+ *
+ * The eigenvalues are repeated according to their algebraic multiplicity,
+ * so there are as many eigenvalues as rows in the matrix. The eigenvalues
+ * are sorted in increasing order.
+ *
+ * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out
+ *
+ * \sa eigenvectors(), MatrixBase::eigenvalues()
+ */
+ const Matrix<Scalar, Dynamic, 1>& eigenvalues() const
+ {
+ eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized.");
+ return m_eivalues;
+ }
+
+ /** \brief Computes the positive-definite square root of the matrix.
+ *
+ * \returns the positive-definite square root of the matrix
+ *
+ * \pre The eigenvalues and eigenvectors of a positive-definite matrix
+ * have been computed before.
+ *
+ * The square root of a positive-definite matrix \f$ A \f$ is the
+ * positive-definite matrix whose square equals \f$ A \f$. This function
+ * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the
+ * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$.
+ *
+ * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out
+ *
+ * \sa operatorInverseSqrt(),
+ * \ref MatrixFunctions_Module "MatrixFunctions Module"
+ */
+ Matrix<Scalar, Dynamic, Dynamic> operatorSqrt() const
+ {
+ eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+ eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+ return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();
+ }
+
+ /** \brief Computes the inverse square root of the matrix.
+ *
+ * \returns the inverse positive-definite square root of the matrix
+ *
+ * \pre The eigenvalues and eigenvectors of a positive-definite matrix
+ * have been computed before.
+ *
+ * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to
+ * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is
+ * cheaper than first computing the square root with operatorSqrt() and
+ * then its inverse with MatrixBase::inverse().
+ *
+ * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp
+ * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out
+ *
+ * \sa operatorSqrt(), MatrixBase::inverse(),
+ * \ref MatrixFunctions_Module "MatrixFunctions Module"
+ */
+ Matrix<Scalar, Dynamic, Dynamic> operatorInverseSqrt() const
+ {
+ eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
+ eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
+ return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint();
+ }
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized.");
+ return m_info;
+ }
+
+ size_t getNbrConvergedEigenValues() const
+ { return m_nbrConverged; }
+
+ size_t getNbrIterations() const
+ { return m_nbrIterations; }
+
+protected:
+ Matrix<Scalar, Dynamic, Dynamic> m_eivec;
+ Matrix<Scalar, Dynamic, 1> m_eivalues;
+ ComputationInfo m_info;
+ bool m_isInitialized;
+ bool m_eigenvectorsOk;
+
+ size_t m_nbrConverged;
+ size_t m_nbrIterations;
+};
+
+
+
+
+
+template<typename MatrixType, typename MatrixSolver, bool BisSPD>
+ArpackGeneralizedSelfAdjointEigenSolver<MatrixType, MatrixSolver, BisSPD>&
+ ArpackGeneralizedSelfAdjointEigenSolver<MatrixType, MatrixSolver, BisSPD>
+::compute(const MatrixType& A, Index nbrEigenvalues,
+ std::string eigs_sigma, int options, RealScalar tol)
+{
+ MatrixType B(0,0);
+ compute(A, B, nbrEigenvalues, eigs_sigma, options, tol);
+
+ return *this;
+}
+
+
+template<typename MatrixType, typename MatrixSolver, bool BisSPD>
+ArpackGeneralizedSelfAdjointEigenSolver<MatrixType, MatrixSolver, BisSPD>&
+ ArpackGeneralizedSelfAdjointEigenSolver<MatrixType, MatrixSolver, BisSPD>
+::compute(const MatrixType& A, const MatrixType& B, Index nbrEigenvalues,
+ std::string eigs_sigma, int options, RealScalar tol)
+{
+ eigen_assert(A.cols() == A.rows());
+ eigen_assert(B.cols() == B.rows());
+ eigen_assert(B.rows() == 0 || A.cols() == B.rows());
+ eigen_assert((options &~ (EigVecMask | GenEigMask)) == 0
+ && (options & EigVecMask) != EigVecMask
+ && "invalid option parameter");
+
+ bool isBempty = (B.rows() == 0) || (B.cols() == 0);
+
+ // For clarity, all parameters match their ARPACK name
+ //
+ // Always 0 on the first call
+ //
+ int ido = 0;
+
+ int n = (int)A.cols();
+
+ // User options: "LA", "SA", "SM", "LM", "BE"
+ //
+ char whch[3] = "LM";
+
+ // Specifies the shift if iparam[6] = { 3, 4, 5 }, not used if iparam[6] = { 1, 2 }
+ //
+ RealScalar sigma = 0.0;
+
+ if (eigs_sigma.length() >= 2 && isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1]))
+ {
+ eigs_sigma[0] = toupper(eigs_sigma[0]);
+ eigs_sigma[1] = toupper(eigs_sigma[1]);
+
+ // In the following special case we're going to invert the problem, since solving
+ // for larger magnitude is much much faster
+ // i.e., if 'SM' is specified, we're going to really use 'LM', the default
+ //
+ if (eigs_sigma.substr(0,2) != "SM")
+ {
+ whch[0] = eigs_sigma[0];
+ whch[1] = eigs_sigma[1];
+ }
+ }
+ else
+ {
+ eigen_assert(false && "Specifying clustered eigenvalues is not yet supported!");
+
+ // If it's not scalar values, then the user may be explicitly
+ // specifying the sigma value to cluster the evs around
+ //
+ sigma = atof(eigs_sigma.c_str());
+
+ // If atof fails, it returns 0.0, which is a fine default
+ //
+ }
+
+ // "I" means normal eigenvalue problem, "G" means generalized
+ //
+ char bmat[2] = "I";
+ if (eigs_sigma.substr(0,2) == "SM" || !(isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1])) || (!isBempty && !BisSPD))
+ bmat[0] = 'G';
+
+ // Now we determine the mode to use
+ //
+ int mode = (bmat[0] == 'G') + 1;
+ if (eigs_sigma.substr(0,2) == "SM" || !(isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1])))
+ {
+ // We're going to use shift-and-invert mode, and basically find
+ // the largest eigenvalues of the inverse operator
+ //
+ mode = 3;
+ }
+
+ // The user-specified number of eigenvalues/vectors to compute
+ //
+ int nev = (int)nbrEigenvalues;
+
+ // Allocate space for ARPACK to store the residual
+ //
+ Scalar *resid = new Scalar[n];
+
+ // Number of Lanczos vectors, must satisfy nev < ncv <= n
+ // Note that this indicates that nev != n, and we cannot compute
+ // all eigenvalues of a mtrix
+ //
+ int ncv = std::min(std::max(2*nev, 20), n);
+
+ // The working n x ncv matrix, also store the final eigenvectors (if computed)
+ //
+ Scalar *v = new Scalar[n*ncv];
+ int ldv = n;
+
+ // Working space
+ //
+ Scalar *workd = new Scalar[3*n];
+ int lworkl = ncv*ncv+8*ncv; // Must be at least this length
+ Scalar *workl = new Scalar[lworkl];
+
+ int *iparam= new int[11];
+ iparam[0] = 1; // 1 means we let ARPACK perform the shifts, 0 means we'd have to do it
+ iparam[2] = std::max(300, (int)std::ceil(2*n/std::max(ncv,1)));
+ iparam[6] = mode; // The mode, 1 is standard ev problem, 2 for generalized ev, 3 for shift-and-invert
+
+ // Used during reverse communicate to notify where arrays start
+ //
+ int *ipntr = new int[11];
+
+ // Error codes are returned in here, initial value of 0 indicates a random initial
+ // residual vector is used, any other values means resid contains the initial residual
+ // vector, possibly from a previous run
+ //
+ int info = 0;
+
+ Scalar scale = 1.0;
+ //if (!isBempty)
+ //{
+ //Scalar scale = B.norm() / std::sqrt(n);
+ //scale = std::pow(2, std::floor(std::log(scale+1)));
+ ////M /= scale;
+ //for (size_t i=0; i<(size_t)B.outerSize(); i++)
+ // for (typename MatrixType::InnerIterator it(B, i); it; ++it)
+ // it.valueRef() /= scale;
+ //}
+
+ MatrixSolver OP;
+ if (mode == 1 || mode == 2)
+ {
+ if (!isBempty)
+ OP.compute(B);
+ }
+ else if (mode == 3)
+ {
+ if (sigma == 0.0)
+ {
+ OP.compute(A);
+ }
+ else
+ {
+ // Note: We will never enter here because sigma must be 0.0
+ //
+ if (isBempty)
+ {
+ MatrixType AminusSigmaB(A);
+ for (Index i=0; i<A.rows(); ++i)
+ AminusSigmaB.coeffRef(i,i) -= sigma;
+
+ OP.compute(AminusSigmaB);
+ }
+ else
+ {
+ MatrixType AminusSigmaB = A - sigma * B;
+ OP.compute(AminusSigmaB);
+ }
+ }
+ }
+
+ if (!(mode == 1 && isBempty) && !(mode == 2 && isBempty) && OP.info() != Success)
+ std::cout << "Error factoring matrix" << std::endl;
+
+ do
+ {
+ internal::arpack_wrapper<Scalar, RealScalar>::saupd(&ido, bmat, &n, whch, &nev, &tol, resid,
+ &ncv, v, &ldv, iparam, ipntr, workd, workl,
+ &lworkl, &info);
+
+ if (ido == -1 || ido == 1)
+ {
+ Scalar *in = workd + ipntr[0] - 1;
+ Scalar *out = workd + ipntr[1] - 1;
+
+ if (ido == 1 && mode != 2)
+ {
+ Scalar *out2 = workd + ipntr[2] - 1;
+ if (isBempty || mode == 1)
+ Matrix<Scalar, Dynamic, 1>::Map(out2, n) = Matrix<Scalar, Dynamic, 1>::Map(in, n);
+ else
+ Matrix<Scalar, Dynamic, 1>::Map(out2, n) = B * Matrix<Scalar, Dynamic, 1>::Map(in, n);
+
+ in = workd + ipntr[2] - 1;
+ }
+
+ if (mode == 1)
+ {
+ if (isBempty)
+ {
+ // OP = A
+ //
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = A * Matrix<Scalar, Dynamic, 1>::Map(in, n);
+ }
+ else
+ {
+ // OP = L^{-1}AL^{-T}
+ //
+ internal::OP<MatrixSolver, MatrixType, Scalar, BisSPD>::applyOP(OP, A, n, in, out);
+ }
+ }
+ else if (mode == 2)
+ {
+ if (ido == 1)
+ Matrix<Scalar, Dynamic, 1>::Map(in, n) = A * Matrix<Scalar, Dynamic, 1>::Map(in, n);
+
+ // OP = B^{-1} A
+ //
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.solve(Matrix<Scalar, Dynamic, 1>::Map(in, n));
+ }
+ else if (mode == 3)
+ {
+ // OP = (A-\sigmaB)B (\sigma could be 0, and B could be I)
+ // The B * in is already computed and stored at in if ido == 1
+ //
+ if (ido == 1 || isBempty)
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.solve(Matrix<Scalar, Dynamic, 1>::Map(in, n));
+ else
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.solve(B * Matrix<Scalar, Dynamic, 1>::Map(in, n));
+ }
+ }
+ else if (ido == 2)
+ {
+ Scalar *in = workd + ipntr[0] - 1;
+ Scalar *out = workd + ipntr[1] - 1;
+
+ if (isBempty || mode == 1)
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = Matrix<Scalar, Dynamic, 1>::Map(in, n);
+ else
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = B * Matrix<Scalar, Dynamic, 1>::Map(in, n);
+ }
+ } while (ido != 99);
+
+ if (info == 1)
+ m_info = NoConvergence;
+ else if (info == 3)
+ m_info = NumericalIssue;
+ else if (info < 0)
+ m_info = InvalidInput;
+ else if (info != 0)
+ eigen_assert(false && "Unknown ARPACK return value!");
+ else
+ {
+ // Do we compute eigenvectors or not?
+ //
+ int rvec = (options & ComputeEigenvectors) == ComputeEigenvectors;
+
+ // "A" means "All", use "S" to choose specific eigenvalues (not yet supported in ARPACK))
+ //
+ char howmny[2] = "A";
+
+ // if howmny == "S", specifies the eigenvalues to compute (not implemented in ARPACK)
+ //
+ int *select = new int[ncv];
+
+ // Final eigenvalues
+ //
+ m_eivalues.resize(nev, 1);
+
+ internal::arpack_wrapper<Scalar, RealScalar>::seupd(&rvec, howmny, select, m_eivalues.data(), v, &ldv,
+ &sigma, bmat, &n, whch, &nev, &tol, resid, &ncv,
+ v, &ldv, iparam, ipntr, workd, workl, &lworkl, &info);
+
+ if (info == -14)
+ m_info = NoConvergence;
+ else if (info != 0)
+ m_info = InvalidInput;
+ else
+ {
+ if (rvec)
+ {
+ m_eivec.resize(A.rows(), nev);
+ for (int i=0; i<nev; i++)
+ for (int j=0; j<n; j++)
+ m_eivec(j,i) = v[i*n+j] / scale;
+
+ if (mode == 1 && !isBempty && BisSPD)
+ internal::OP<MatrixSolver, MatrixType, Scalar, BisSPD>::project(OP, n, nev, m_eivec.data());
+
+ m_eigenvectorsOk = true;
+ }
+
+ m_nbrIterations = iparam[2];
+ m_nbrConverged = iparam[4];
+
+ m_info = Success;
+ }
+
+ delete select;
+ }
+
+ delete v;
+ delete iparam;
+ delete ipntr;
+ delete workd;
+ delete workl;
+ delete resid;
+
+ m_isInitialized = true;
+
+ return *this;
+}
+
+
+// Single precision
+//
+extern "C" void ssaupd_(int *ido, char *bmat, int *n, char *which,
+ int *nev, float *tol, float *resid, int *ncv,
+ float *v, int *ldv, int *iparam, int *ipntr,
+ float *workd, float *workl, int *lworkl,
+ int *info);
+
+extern "C" void sseupd_(int *rvec, char *All, int *select, float *d,
+ float *z, int *ldz, float *sigma,
+ char *bmat, int *n, char *which, int *nev,
+ float *tol, float *resid, int *ncv, float *v,
+ int *ldv, int *iparam, int *ipntr, float *workd,
+ float *workl, int *lworkl, int *ierr);
+
+// Double precision
+//
+extern "C" void dsaupd_(int *ido, char *bmat, int *n, char *which,
+ int *nev, double *tol, double *resid, int *ncv,
+ double *v, int *ldv, int *iparam, int *ipntr,
+ double *workd, double *workl, int *lworkl,
+ int *info);
+
+extern "C" void dseupd_(int *rvec, char *All, int *select, double *d,
+ double *z, int *ldz, double *sigma,
+ char *bmat, int *n, char *which, int *nev,
+ double *tol, double *resid, int *ncv, double *v,
+ int *ldv, int *iparam, int *ipntr, double *workd,
+ double *workl, int *lworkl, int *ierr);
+
+
+namespace internal {
+
+template<typename Scalar, typename RealScalar> struct arpack_wrapper
+{
+ static inline void saupd(int *ido, char *bmat, int *n, char *which,
+ int *nev, RealScalar *tol, Scalar *resid, int *ncv,
+ Scalar *v, int *ldv, int *iparam, int *ipntr,
+ Scalar *workd, Scalar *workl, int *lworkl, int *info)
+ {
+ EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)
+ }
+
+ static inline void seupd(int *rvec, char *All, int *select, Scalar *d,
+ Scalar *z, int *ldz, RealScalar *sigma,
+ char *bmat, int *n, char *which, int *nev,
+ RealScalar *tol, Scalar *resid, int *ncv, Scalar *v,
+ int *ldv, int *iparam, int *ipntr, Scalar *workd,
+ Scalar *workl, int *lworkl, int *ierr)
+ {
+ EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL)
+ }
+};
+
+template <> struct arpack_wrapper<float, float>
+{
+ static inline void saupd(int *ido, char *bmat, int *n, char *which,
+ int *nev, float *tol, float *resid, int *ncv,
+ float *v, int *ldv, int *iparam, int *ipntr,
+ float *workd, float *workl, int *lworkl, int *info)
+ {
+ ssaupd_(ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info);
+ }
+
+ static inline void seupd(int *rvec, char *All, int *select, float *d,
+ float *z, int *ldz, float *sigma,
+ char *bmat, int *n, char *which, int *nev,
+ float *tol, float *resid, int *ncv, float *v,
+ int *ldv, int *iparam, int *ipntr, float *workd,
+ float *workl, int *lworkl, int *ierr)
+ {
+ sseupd_(rvec, All, select, d, z, ldz, sigma, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr,
+ workd, workl, lworkl, ierr);
+ }
+};
+
+template <> struct arpack_wrapper<double, double>
+{
+ static inline void saupd(int *ido, char *bmat, int *n, char *which,
+ int *nev, double *tol, double *resid, int *ncv,
+ double *v, int *ldv, int *iparam, int *ipntr,
+ double *workd, double *workl, int *lworkl, int *info)
+ {
+ dsaupd_(ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info);
+ }
+
+ static inline void seupd(int *rvec, char *All, int *select, double *d,
+ double *z, int *ldz, double *sigma,
+ char *bmat, int *n, char *which, int *nev,
+ double *tol, double *resid, int *ncv, double *v,
+ int *ldv, int *iparam, int *ipntr, double *workd,
+ double *workl, int *lworkl, int *ierr)
+ {
+ dseupd_(rvec, All, select, d, v, ldv, sigma, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr,
+ workd, workl, lworkl, ierr);
+ }
+};
+
+
+template<typename MatrixSolver, typename MatrixType, typename Scalar, bool BisSPD>
+struct OP
+{
+ static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out);
+ static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs);
+};
+
+template<typename MatrixSolver, typename MatrixType, typename Scalar>
+struct OP<MatrixSolver, MatrixType, Scalar, true>
+{
+ static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out)
+{
+ // OP = L^{-1} A L^{-T} (B = LL^T)
+ //
+ // First solve L^T out = in
+ //
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.matrixU().solve(Matrix<Scalar, Dynamic, 1>::Map(in, n));
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.permutationPinv() * Matrix<Scalar, Dynamic, 1>::Map(out, n);
+
+ // Then compute out = A out
+ //
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = A * Matrix<Scalar, Dynamic, 1>::Map(out, n);
+
+ // Then solve L out = out
+ //
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.permutationP() * Matrix<Scalar, Dynamic, 1>::Map(out, n);
+ Matrix<Scalar, Dynamic, 1>::Map(out, n) = OP.matrixL().solve(Matrix<Scalar, Dynamic, 1>::Map(out, n));
+}
+
+ static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs)
+{
+ // Solve L^T out = in
+ //
+ Matrix<Scalar, Dynamic, Dynamic>::Map(vecs, n, k) = OP.matrixU().solve(Matrix<Scalar, Dynamic, Dynamic>::Map(vecs, n, k));
+ Matrix<Scalar, Dynamic, Dynamic>::Map(vecs, n, k) = OP.permutationPinv() * Matrix<Scalar, Dynamic, Dynamic>::Map(vecs, n, k);
+}
+
+};
+
+template<typename MatrixSolver, typename MatrixType, typename Scalar>
+struct OP<MatrixSolver, MatrixType, Scalar, false>
+{
+ static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out)
+{
+ eigen_assert(false && "Should never be in here...");
+}
+
+ static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs)
+{
+ eigen_assert(false && "Should never be in here...");
+}
+
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_ARPACKSELFADJOINTEIGENSOLVER_H
+
diff --git a/unsupported/Eigen/src/FFT/ei_kissfft_impl.h b/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
index 37f5af4c1..be51b4e6f 100644
--- a/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
+++ b/unsupported/Eigen/src/FFT/ei_kissfft_impl.h
@@ -28,6 +28,7 @@ struct kiss_cpx_fft
inline
void make_twiddles(int nfft,bool inverse)
{
+ using std::acos;
m_inverse = inverse;
m_twiddles.resize(nfft);
Scalar phinc = (inverse?2:-2)* acos( (Scalar) -1) / nfft;
@@ -399,6 +400,7 @@ struct kissfft_impl
inline
Complex * real_twiddles(int ncfft2)
{
+ using std::acos;
std::vector<Complex> & twidref = m_realTwiddles[ncfft2];// creates new if not there
if ( (int)twidref.size() != ncfft2 ) {
twidref.resize(ncfft2);
diff --git a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
index b83bf7aef..dc0093eb9 100644
--- a/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
+++ b/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h
@@ -2,10 +2,6 @@
// for linear algebra.
//
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/* NOTE The functions of this file have been adapted from the GMM++ library */
@@ -62,7 +58,9 @@ void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV)
Scalar rho, rho_1, alpha;
d.setZero();
- CINV.startFill(); // FIXME estimate the number of non-zeros
+ typedef Triplet<double> T;
+ std::vector<T> tripletList;
+
for (Index i = 0; i < rows; ++i)
{
d[i] = 1.0;
@@ -88,11 +86,12 @@ void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV)
// FIXME add a generic "prune/filter" expression for both dense and sparse object to sparse
for (Index j=0; j<l.size(); ++j)
if (l[j]<1e-15)
- CINV.fill(i,j) = l[j];
+ tripletList.push_back(T(i,j,l(j)));
+
d[i] = 0.0;
}
- CINV.endFill();
+ CINV.setFromTriplets(tripletList.begin(), tripletList.end());
}
@@ -107,6 +106,7 @@ template<typename TMatrix, typename CMatrix,
void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x,
const VectorB& b, const VectorF& f, IterationController &iter)
{
+ using std::sqrt;
typedef typename TMatrix::Scalar Scalar;
typedef typename TMatrix::Index Index;
typedef Matrix<Scalar,Dynamic,1> TmpVec;
diff --git a/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
new file mode 100644
index 000000000..9fcc8a8d9
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/DGMRES.h
@@ -0,0 +1,542 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_DGMRES_H
+#define EIGEN_DGMRES_H
+
+#include <Eigen/Eigenvalues>
+
+namespace Eigen {
+
+template< typename _MatrixType,
+ typename _Preconditioner = DiagonalPreconditioner<typename _MatrixType::Scalar> >
+class DGMRES;
+
+namespace internal {
+
+template< typename _MatrixType, typename _Preconditioner>
+struct traits<DGMRES<_MatrixType,_Preconditioner> >
+{
+ typedef _MatrixType MatrixType;
+ typedef _Preconditioner Preconditioner;
+};
+
+/** \brief Computes a permutation vector to have a sorted sequence
+ * \param vec The vector to reorder.
+ * \param perm gives the sorted sequence on output. Must be initialized with 0..n-1
+ * \param ncut Put the ncut smallest elements at the end of the vector
+ * WARNING This is an expensive sort, so should be used only
+ * for small size vectors
+ * TODO Use modified QuickSplit or std::nth_element to get the smallest values
+ */
+template <typename VectorType, typename IndexType>
+void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::Scalar& ncut)
+{
+ eigen_assert(vec.size() == perm.size());
+ typedef typename IndexType::Scalar Index;
+ typedef typename VectorType::Scalar Scalar;
+ bool flag;
+ for (Index k = 0; k < ncut; k++)
+ {
+ flag = false;
+ for (Index j = 0; j < vec.size()-1; j++)
+ {
+ if ( vec(perm(j)) < vec(perm(j+1)) )
+ {
+ std::swap(perm(j),perm(j+1));
+ flag = true;
+ }
+ if (!flag) break; // The vector is in sorted order
+ }
+ }
+}
+
+}
+/**
+ * \ingroup IterativeLInearSolvers_Module
+ * \brief A Restarted GMRES with deflation.
+ * This class implements a modification of the GMRES solver for
+ * sparse linear systems. The basis is built with modified
+ * Gram-Schmidt. At each restart, a few approximated eigenvectors
+ * corresponding to the smallest eigenvalues are used to build a
+ * preconditioner for the next cycle. This preconditioner
+ * for deflation can be combined with any other preconditioner,
+ * the IncompleteLUT for instance. The preconditioner is applied
+ * at right of the matrix and the combination is multiplicative.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+ * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+ * Typical usage :
+ * \code
+ * SparseMatrix<double> A;
+ * VectorXd x, b;
+ * //Fill A and b ...
+ * DGMRES<SparseMatrix<double> > solver;
+ * solver.set_restart(30); // Set restarting value
+ * solver.setEigenv(1); // Set the number of eigenvalues to deflate
+ * solver.compute(A);
+ * x = solver.solve(b);
+ * \endcode
+ *
+ * References :
+ * [1] D. NUENTSA WAKAM and F. PACULL, Memory Efficient Hybrid
+ * Algebraic Solvers for Linear Systems Arising from Compressible
+ * Flows, Computers and Fluids, In Press,
+ * http://dx.doi.org/10.1016/j.compfluid.2012.03.023
+ * [2] K. Burrage and J. Erhel, On the performance of various
+ * adaptive preconditioned GMRES strategies, 5(1998), 101-121.
+ * [3] J. Erhel, K. Burrage and B. Pohl, Restarted GMRES
+ * preconditioned by deflation,J. Computational and Applied
+ * Mathematics, 69(1996), 303-318.
+
+ *
+ */
+template< typename _MatrixType, typename _Preconditioner>
+class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
+{
+ typedef IterativeSolverBase<DGMRES> Base;
+ using Base::mp_matrix;
+ using Base::m_error;
+ using Base::m_iterations;
+ using Base::m_info;
+ using Base::m_isInitialized;
+ using Base::m_tolerance;
+ public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef _Preconditioner Preconditioner;
+ typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+ typedef Matrix<RealScalar,Dynamic,Dynamic> DenseRealMatrix;
+ typedef Matrix<Scalar,Dynamic,1> DenseVector;
+ typedef Matrix<RealScalar,Dynamic,1> DenseRealVector;
+ typedef Matrix<std::complex<RealScalar>, Dynamic, 1> ComplexVector;
+
+
+ /** Default constructor. */
+ DGMRES() : Base(),m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {}
+
+ /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+ *
+ * This constructor is a shortcut for the default constructor followed
+ * by a call to compute().
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ DGMRES(const MatrixType& A) : Base(A),m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false)
+ {}
+
+ ~DGMRES() {}
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+ * \a x0 as an initial solution.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs,typename Guess>
+ inline const internal::solve_retval_with_guess<DGMRES, Rhs, Guess>
+ solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+ {
+ eigen_assert(m_isInitialized && "DGMRES is not initialized.");
+ eigen_assert(Base::rows()==b.rows()
+ && "DGMRES::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval_with_guess
+ <DGMRES, Rhs, Guess>(*this, b.derived(), x0);
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solveWithGuess(const Rhs& b, Dest& x) const
+ {
+ bool failed = false;
+ for(int j=0; j<b.cols(); ++j)
+ {
+ m_iterations = Base::maxIterations();
+ m_error = Base::m_tolerance;
+
+ typename Dest::ColXpr xj(x,j);
+ dgmres(*mp_matrix, b.col(j), xj, Base::m_preconditioner);
+ }
+ m_info = failed ? NumericalIssue
+ : m_error <= Base::m_tolerance ? Success
+ : NoConvergence;
+ m_isInitialized = true;
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x = b;
+ _solveWithGuess(b,x);
+ }
+ /**
+ * Get the restart value
+ */
+ int restart() { return m_restart; }
+
+ /**
+ * Set the restart value (default is 30)
+ */
+ void set_restart(const int restart) { m_restart=restart; }
+
+ /**
+ * Set the number of eigenvalues to deflate at each restart
+ */
+ void setEigenv(const int neig)
+ {
+ m_neig = neig;
+ if (neig+1 > m_maxNeig) m_maxNeig = neig+1; // To allow for complex conjugates
+ }
+
+ /**
+ * Get the size of the deflation subspace size
+ */
+ int deflSize() {return m_r; }
+
+ /**
+ * Set the maximum size of the deflation subspace
+ */
+ void setMaxEigenv(const int maxNeig) { m_maxNeig = maxNeig; }
+
+ protected:
+ // DGMRES algorithm
+ template<typename Rhs, typename Dest>
+ void dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x, const Preconditioner& precond) const;
+ // Perform one cycle of GMRES
+ template<typename Dest>
+ int dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const;
+ // Compute data to use for deflation
+ int dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, Index& neig) const;
+ // Apply deflation to a vector
+ template<typename RhsType, typename DestType>
+ int dgmresApplyDeflation(const RhsType& In, DestType& Out) const;
+ ComplexVector schurValues(const ComplexSchur<DenseMatrix>& schurofH) const;
+ ComplexVector schurValues(const RealSchur<DenseMatrix>& schurofH) const;
+ // Init data for deflation
+ void dgmresInitDeflation(Index& rows) const;
+ mutable DenseMatrix m_V; // Krylov basis vectors
+ mutable DenseMatrix m_H; // Hessenberg matrix
+ mutable DenseMatrix m_Hes; // Initial hessenberg matrix wihout Givens rotations applied
+ mutable Index m_restart; // Maximum size of the Krylov subspace
+ mutable DenseMatrix m_U; // Vectors that form the basis of the invariant subspace
+ mutable DenseMatrix m_MU; // matrix operator applied to m_U (for next cycles)
+ mutable DenseMatrix m_T; /* T=U^T*M^{-1}*A*U */
+ mutable PartialPivLU<DenseMatrix> m_luT; // LU factorization of m_T
+ mutable int m_neig; //Number of eigenvalues to extract at each restart
+ mutable int m_r; // Current number of deflated eigenvalues, size of m_U
+ mutable int m_maxNeig; // Maximum number of eigenvalues to deflate
+ mutable RealScalar m_lambdaN; //Modulus of the largest eigenvalue of A
+ mutable bool m_isDeflAllocated;
+ mutable bool m_isDeflInitialized;
+
+ //Adaptive strategy
+ mutable RealScalar m_smv; // Smaller multiple of the remaining number of steps allowed
+ mutable bool m_force; // Force the use of deflation at each restart
+
+};
+/**
+ * \brief Perform several cycles of restarted GMRES with modified Gram Schmidt,
+ *
+ * A right preconditioner is used combined with deflation.
+ *
+ */
+template< typename _MatrixType, typename _Preconditioner>
+template<typename Rhs, typename Dest>
+void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x,
+ const Preconditioner& precond) const
+{
+ //Initialization
+ int n = mat.rows();
+ DenseVector r0(n);
+ int nbIts = 0;
+ m_H.resize(m_restart+1, m_restart);
+ m_Hes.resize(m_restart, m_restart);
+ m_V.resize(n,m_restart+1);
+ //Initial residual vector and intial norm
+ x = precond.solve(x);
+ r0 = rhs - mat * x;
+ RealScalar beta = r0.norm();
+ RealScalar normRhs = rhs.norm();
+ m_error = beta/normRhs;
+ if(m_error < m_tolerance)
+ m_info = Success;
+ else
+ m_info = NoConvergence;
+
+ // Iterative process
+ while (nbIts < m_iterations && m_info == NoConvergence)
+ {
+ dgmresCycle(mat, precond, x, r0, beta, normRhs, nbIts);
+
+ // Compute the new residual vector for the restart
+ if (nbIts < m_iterations && m_info == NoConvergence)
+ r0 = rhs - mat * x;
+ }
+}
+
+/**
+ * \brief Perform one restart cycle of DGMRES
+ * \param mat The coefficient matrix
+ * \param precond The preconditioner
+ * \param x the new approximated solution
+ * \param r0 The initial residual vector
+ * \param beta The norm of the residual computed so far
+ * \param normRhs The norm of the right hand side vector
+ * \param nbIts The number of iterations
+ */
+template< typename _MatrixType, typename _Preconditioner>
+template<typename Dest>
+int DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, int& nbIts) const
+{
+ //Initialization
+ DenseVector g(m_restart+1); // Right hand side of the least square problem
+ g.setZero();
+ g(0) = Scalar(beta);
+ m_V.col(0) = r0/beta;
+ m_info = NoConvergence;
+ std::vector<JacobiRotation<Scalar> >gr(m_restart); // Givens rotations
+ int it = 0; // Number of inner iterations
+ int n = mat.rows();
+ DenseVector tv1(n), tv2(n); //Temporary vectors
+ while (m_info == NoConvergence && it < m_restart && nbIts < m_iterations)
+ {
+ // Apply preconditioner(s) at right
+ if (m_isDeflInitialized )
+ {
+ dgmresApplyDeflation(m_V.col(it), tv1); // Deflation
+ tv2 = precond.solve(tv1);
+ }
+ else
+ {
+ tv2 = precond.solve(m_V.col(it)); // User's selected preconditioner
+ }
+ tv1 = mat * tv2;
+
+ // Orthogonalize it with the previous basis in the basis using modified Gram-Schmidt
+ Scalar coef;
+ for (int i = 0; i <= it; ++i)
+ {
+ coef = tv1.dot(m_V.col(i));
+ tv1 = tv1 - coef * m_V.col(i);
+ m_H(i,it) = coef;
+ m_Hes(i,it) = coef;
+ }
+ // Normalize the vector
+ coef = tv1.norm();
+ m_V.col(it+1) = tv1/coef;
+ m_H(it+1, it) = coef;
+// m_Hes(it+1,it) = coef;
+
+ // FIXME Check for happy breakdown
+
+ // Update Hessenberg matrix with Givens rotations
+ for (int i = 1; i <= it; ++i)
+ {
+ m_H.col(it).applyOnTheLeft(i-1,i,gr[i-1].adjoint());
+ }
+ // Compute the new plane rotation
+ gr[it].makeGivens(m_H(it, it), m_H(it+1,it));
+ // Apply the new rotation
+ m_H.col(it).applyOnTheLeft(it,it+1,gr[it].adjoint());
+ g.applyOnTheLeft(it,it+1, gr[it].adjoint());
+
+ beta = std::abs(g(it+1));
+ m_error = beta/normRhs;
+ std::cerr << nbIts << " Relative Residual Norm " << m_error << std::endl;
+ it++; nbIts++;
+
+ if (m_error < m_tolerance)
+ {
+ // The method has converged
+ m_info = Success;
+ break;
+ }
+ }
+
+ // Compute the new coefficients by solving the least square problem
+// it++;
+ //FIXME Check first if the matrix is singular ... zero diagonal
+ DenseVector nrs(m_restart);
+ nrs = m_H.topLeftCorner(it,it).template triangularView<Upper>().solve(g.head(it));
+
+ // Form the new solution
+ if (m_isDeflInitialized)
+ {
+ tv1 = m_V.leftCols(it) * nrs;
+ dgmresApplyDeflation(tv1, tv2);
+ x = x + precond.solve(tv2);
+ }
+ else
+ x = x + precond.solve(m_V.leftCols(it) * nrs);
+
+ // Go for a new cycle and compute data for deflation
+ if(nbIts < m_iterations && m_info == NoConvergence && m_neig > 0 && (m_r+m_neig) < m_maxNeig)
+ dgmresComputeDeflationData(mat, precond, it, m_neig);
+ return 0;
+
+}
+
+
+template< typename _MatrixType, typename _Preconditioner>
+void DGMRES<_MatrixType, _Preconditioner>::dgmresInitDeflation(Index& rows) const
+{
+ m_U.resize(rows, m_maxNeig);
+ m_MU.resize(rows, m_maxNeig);
+ m_T.resize(m_maxNeig, m_maxNeig);
+ m_lambdaN = 0.0;
+ m_isDeflAllocated = true;
+}
+
+template< typename _MatrixType, typename _Preconditioner>
+inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const ComplexSchur<DenseMatrix>& schurofH) const
+{
+ return schurofH.matrixT().diagonal();
+}
+
+template< typename _MatrixType, typename _Preconditioner>
+inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const RealSchur<DenseMatrix>& schurofH) const
+{
+ typedef typename MatrixType::Index Index;
+ const DenseMatrix& T = schurofH.matrixT();
+ Index it = T.rows();
+ ComplexVector eig(it);
+ Index j = 0;
+ while (j < it-1)
+ {
+ if (T(j+1,j) ==Scalar(0))
+ {
+ eig(j) = std::complex<RealScalar>(T(j,j),RealScalar(0));
+ j++;
+ }
+ else
+ {
+ eig(j) = std::complex<RealScalar>(T(j,j),T(j+1,j));
+ eig(j+1) = std::complex<RealScalar>(T(j,j+1),T(j+1,j+1));
+ j++;
+ }
+ }
+ if (j < it-1) eig(j) = std::complex<RealScalar>(T(j,j),RealScalar(0));
+ return eig;
+}
+
+template< typename _MatrixType, typename _Preconditioner>
+int DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, Index& neig) const
+{
+ // First, find the Schur form of the Hessenberg matrix H
+ typename internal::conditional<NumTraits<Scalar>::IsComplex, ComplexSchur<DenseMatrix>, RealSchur<DenseMatrix> >::type schurofH;
+ bool computeU = true;
+ DenseMatrix matrixQ(it,it);
+ matrixQ.setIdentity();
+ schurofH.computeFromHessenberg(m_Hes.topLeftCorner(it,it), matrixQ, computeU);
+
+ ComplexVector eig(it);
+ Matrix<Index,Dynamic,1>perm(it);
+ eig = this->schurValues(schurofH);
+
+ // Reorder the absolute values of Schur values
+ DenseRealVector modulEig(it);
+ for (int j=0; j<it; ++j) modulEig(j) = std::abs(eig(j));
+ perm.setLinSpaced(it,0,it-1);
+ internal::sortWithPermutation(modulEig, perm, neig);
+
+ if (!m_lambdaN)
+ {
+ m_lambdaN = (std::max)(modulEig.maxCoeff(), m_lambdaN);
+ }
+ //Count the real number of extracted eigenvalues (with complex conjugates)
+ int nbrEig = 0;
+ while (nbrEig < neig)
+ {
+ if(eig(perm(it-nbrEig-1)).imag() == RealScalar(0)) nbrEig++;
+ else nbrEig += 2;
+ }
+ // Extract the Schur vectors corresponding to the smallest Ritz values
+ DenseMatrix Sr(it, nbrEig);
+ Sr.setZero();
+ for (int j = 0; j < nbrEig; j++)
+ {
+ Sr.col(j) = schurofH.matrixU().col(perm(it-j-1));
+ }
+
+ // Form the Schur vectors of the initial matrix using the Krylov basis
+ DenseMatrix X;
+ X = m_V.leftCols(it) * Sr;
+ if (m_r)
+ {
+ // Orthogonalize X against m_U using modified Gram-Schmidt
+ for (int j = 0; j < nbrEig; j++)
+ for (int k =0; k < m_r; k++)
+ X.col(j) = X.col(j) - (m_U.col(k).dot(X.col(j)))*m_U.col(k);
+ }
+
+ // Compute m_MX = A * M^-1 * X
+ Index m = m_V.rows();
+ if (!m_isDeflAllocated)
+ dgmresInitDeflation(m);
+ DenseMatrix MX(m, nbrEig);
+ DenseVector tv1(m);
+ for (int j = 0; j < nbrEig; j++)
+ {
+ tv1 = mat * X.col(j);
+ MX.col(j) = precond.solve(tv1);
+ }
+
+ //Update m_T = [U'MU U'MX; X'MU X'MX]
+ m_T.block(m_r, m_r, nbrEig, nbrEig) = X.transpose() * MX;
+ if(m_r)
+ {
+ m_T.block(0, m_r, m_r, nbrEig) = m_U.leftCols(m_r).transpose() * MX;
+ m_T.block(m_r, 0, nbrEig, m_r) = X.transpose() * m_MU.leftCols(m_r);
+ }
+
+ // Save X into m_U and m_MX in m_MU
+ for (int j = 0; j < nbrEig; j++) m_U.col(m_r+j) = X.col(j);
+ for (int j = 0; j < nbrEig; j++) m_MU.col(m_r+j) = MX.col(j);
+ // Increase the size of the invariant subspace
+ m_r += nbrEig;
+
+ // Factorize m_T into m_luT
+ m_luT.compute(m_T.topLeftCorner(m_r, m_r));
+
+ //FIXME CHeck if the factorization was correctly done (nonsingular matrix)
+ m_isDeflInitialized = true;
+ return 0;
+}
+template<typename _MatrixType, typename _Preconditioner>
+template<typename RhsType, typename DestType>
+int DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x, DestType &y) const
+{
+ DenseVector x1 = m_U.leftCols(m_r).transpose() * x;
+ y = x + m_U.leftCols(m_r) * ( m_lambdaN * m_luT.solve(x1) - x1);
+ return 0;
+}
+
+namespace internal {
+
+ template<typename _MatrixType, typename _Preconditioner, typename Rhs>
+struct solve_retval<DGMRES<_MatrixType, _Preconditioner>, Rhs>
+ : solve_retval_base<DGMRES<_MatrixType, _Preconditioner>, Rhs>
+{
+ typedef DGMRES<_MatrixType, _Preconditioner> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+} // end namespace internal
+
+} // end namespace Eigen
+#endif
diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
index 34e67db82..c8c84069e 100644
--- a/unsupported/Eigen/src/IterativeSolvers/GMRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
@@ -2,7 +2,7 @@
// for linear algebra.
//
// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
-// Copyright (C) 2012 Kolja Brix <brix@igpm.rwth-aaachen.de>
+// Copyright (C) 2012, 2014 Kolja Brix <brix@igpm.rwth-aaachen.de>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -61,7 +61,6 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
typedef typename Dest::RealScalar RealScalar;
typedef typename Dest::Scalar Scalar;
- typedef Matrix < RealScalar, Dynamic, 1 > RealVectorType;
typedef Matrix < Scalar, Dynamic, 1 > VectorType;
typedef Matrix < Scalar, Dynamic, Dynamic > FMatrixType;
@@ -73,16 +72,20 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
VectorType p0 = rhs - mat*x;
VectorType r0 = precond.solve(p0);
-// RealScalar r0_sqnorm = r0.squaredNorm();
+
+ // is initial guess already good enough?
+ if(abs(r0.norm()) < tol) {
+ return true;
+ }
VectorType w = VectorType::Zero(restart + 1);
- FMatrixType H = FMatrixType::Zero(m, restart + 1);
+ FMatrixType H = FMatrixType::Zero(m, restart + 1); // Hessenberg matrix
VectorType tau = VectorType::Zero(restart + 1);
std::vector < JacobiRotation < Scalar > > G(restart);
// generate first Householder vector
- VectorType e;
+ VectorType e(m-1);
RealScalar beta;
r0.makeHouseholder(e, tau.coeffRef(0), beta);
w(0)=(Scalar) beta;
@@ -348,7 +351,8 @@ public:
template<typename Rhs,typename Dest>
void _solve(const Rhs& b, Dest& x) const
{
- x.setZero();
+ x = b;
+ if(x.squaredNorm() == 0) return; // Check Zero right hand side
_solveWithGuess(b,x);
}
diff --git a/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
new file mode 100644
index 000000000..661c1f2e0
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/IncompleteCholesky.h
@@ -0,0 +1,278 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_INCOMPLETE_CHOlESKY_H
+#define EIGEN_INCOMPLETE_CHOlESKY_H
+#include "Eigen/src/IterativeLinearSolvers/IncompleteLUT.h"
+#include <Eigen/OrderingMethods>
+#include <list>
+
+namespace Eigen {
+/**
+ * \brief Modified Incomplete Cholesky with dual threshold
+ *
+ * References : C-J. Lin and J. J. Moré, Incomplete Cholesky Factorizations with
+ * Limited memory, SIAM J. Sci. Comput. 21(1), pp. 24-45, 1999
+ *
+ * \tparam _MatrixType The type of the sparse matrix. It should be a symmetric
+ * matrix. It is advised to give a row-oriented sparse matrix
+ * \tparam _UpLo The triangular part of the matrix to reference.
+ * \tparam _OrderingType
+ */
+
+template <typename Scalar, int _UpLo = Lower, typename _OrderingType = NaturalOrdering<int> >
+class IncompleteCholesky : internal::noncopyable
+{
+ public:
+ typedef SparseMatrix<Scalar,ColMajor> MatrixType;
+ typedef _OrderingType OrderingType;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+ typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
+ typedef Matrix<Scalar,Dynamic,1> ScalarType;
+ typedef Matrix<Index,Dynamic, 1> IndexType;
+ typedef std::vector<std::list<Index> > VectorList;
+ enum { UpLo = _UpLo };
+ public:
+ IncompleteCholesky() : m_shift(1),m_factorizationIsOk(false) {}
+ IncompleteCholesky(const MatrixType& matrix) : m_shift(1),m_factorizationIsOk(false)
+ {
+ compute(matrix);
+ }
+
+ Index rows() const { return m_L.rows(); }
+
+ Index cols() const { return m_L.cols(); }
+
+
+ /** \brief Reports whether previous computation was successful.
+ *
+ * \returns \c Success if computation was succesful,
+ * \c NumericalIssue if the matrix appears to be negative.
+ */
+ ComputationInfo info() const
+ {
+ eigen_assert(m_isInitialized && "IncompleteLLT is not initialized.");
+ return m_info;
+ }
+
+ /**
+ * \brief Set the initial shift parameter
+ */
+ void setShift( Scalar shift) { m_shift = shift; }
+
+ /**
+ * \brief Computes the fill reducing permutation vector.
+ */
+ template<typename MatrixType>
+ void analyzePattern(const MatrixType& mat)
+ {
+ OrderingType ord;
+ ord(mat.template selfadjointView<UpLo>(), m_perm);
+ m_analysisIsOk = true;
+ }
+
+ template<typename MatrixType>
+ void factorize(const MatrixType& amat);
+
+ template<typename MatrixType>
+ void compute (const MatrixType& matrix)
+ {
+ analyzePattern(matrix);
+ factorize(matrix);
+ }
+
+ template<typename Rhs, typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ eigen_assert(m_factorizationIsOk && "factorize() should be called first");
+ if (m_perm.rows() == b.rows())
+ x = m_perm.inverse() * b;
+ else
+ x = b;
+ x = m_scal.asDiagonal() * x;
+ x = m_L.template triangularView<UnitLower>().solve(x);
+ x = m_L.adjoint().template triangularView<Upper>().solve(x);
+ if (m_perm.rows() == b.rows())
+ x = m_perm * x;
+ x = m_scal.asDiagonal() * x;
+ }
+ template<typename Rhs> inline const internal::solve_retval<IncompleteCholesky, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(m_factorizationIsOk && "IncompleteLLT did not succeed");
+ eigen_assert(m_isInitialized && "IncompleteLLT is not initialized.");
+ eigen_assert(cols()==b.rows()
+ && "IncompleteLLT::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval<IncompleteCholesky, Rhs>(*this, b.derived());
+ }
+ protected:
+ SparseMatrix<Scalar,ColMajor> m_L; // The lower part stored in CSC
+ ScalarType m_scal; // The vector for scaling the matrix
+ Scalar m_shift; //The initial shift parameter
+ bool m_analysisIsOk;
+ bool m_factorizationIsOk;
+ bool m_isInitialized;
+ ComputationInfo m_info;
+ PermutationType m_perm;
+
+ private:
+ template <typename IdxType, typename SclType>
+ inline void updateList(const IdxType& colPtr, IdxType& rowIdx, SclType& vals, const Index& col, const Index& jk, IndexType& firstElt, VectorList& listCol);
+};
+
+template<typename Scalar, int _UpLo, typename OrderingType>
+template<typename _MatrixType>
+void IncompleteCholesky<Scalar,_UpLo, OrderingType>::factorize(const _MatrixType& mat)
+{
+ using std::sqrt;
+ using std::min;
+ eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
+
+ // Dropping strategies : Keep only the p largest elements per column, where p is the number of elements in the column of the original matrix. Other strategies will be added
+
+ // Apply the fill-reducing permutation computed in analyzePattern()
+ if (m_perm.rows() == mat.rows() ) // To detect the null permutation
+ m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>().twistedBy(m_perm);
+ else
+ m_L.template selfadjointView<Lower>() = mat.template selfadjointView<_UpLo>();
+
+ Index n = m_L.cols();
+ Index nnz = m_L.nonZeros();
+ Map<ScalarType> vals(m_L.valuePtr(), nnz); //values
+ Map<IndexType> rowIdx(m_L.innerIndexPtr(), nnz); //Row indices
+ Map<IndexType> colPtr( m_L.outerIndexPtr(), n+1); // Pointer to the beginning of each row
+ IndexType firstElt(n-1); // for each j, points to the next entry in vals that will be used in the factorization
+ VectorList listCol(n); // listCol(j) is a linked list of columns to update column j
+ ScalarType curCol(n); // Store a nonzero values in each column
+ IndexType irow(n); // Row indices of nonzero elements in each column
+
+
+ // Computes the scaling factors
+ m_scal.resize(n);
+ for (int j = 0; j < n; j++)
+ {
+ m_scal(j) = m_L.col(j).norm();
+ m_scal(j) = sqrt(m_scal(j));
+ }
+ // Scale and compute the shift for the matrix
+ Scalar mindiag = vals[0];
+ for (int j = 0; j < n; j++){
+ for (int k = colPtr[j]; k < colPtr[j+1]; k++)
+ vals[k] /= (m_scal(j) * m_scal(rowIdx[k]));
+ mindiag = (min)(vals[colPtr[j]], mindiag);
+ }
+
+ if(mindiag < Scalar(0.)) m_shift = m_shift - mindiag;
+ // Apply the shift to the diagonal elements of the matrix
+ for (int j = 0; j < n; j++)
+ vals[colPtr[j]] += m_shift;
+ // jki version of the Cholesky factorization
+ for (int j=0; j < n; ++j)
+ {
+ //Left-looking factorize the column j
+ // First, load the jth column into curCol
+ Scalar diag = vals[colPtr[j]]; // It is assumed that only the lower part is stored
+ curCol.setZero();
+ irow.setLinSpaced(n,0,n-1);
+ for (int i = colPtr[j] + 1; i < colPtr[j+1]; i++)
+ {
+ curCol(rowIdx[i]) = vals[i];
+ irow(rowIdx[i]) = rowIdx[i];
+ }
+ std::list<int>::iterator k;
+ // Browse all previous columns that will update column j
+ for(k = listCol[j].begin(); k != listCol[j].end(); k++)
+ {
+ int jk = firstElt(*k); // First element to use in the column
+ jk += 1;
+ for (int i = jk; i < colPtr[*k+1]; i++)
+ {
+ curCol(rowIdx[i]) -= vals[i] * vals[jk] ;
+ }
+ updateList(colPtr,rowIdx,vals, *k, jk, firstElt, listCol);
+ }
+
+ // Scale the current column
+ if(RealScalar(diag) <= 0)
+ {
+ std::cerr << "\nNegative diagonal during Incomplete factorization... "<< j << "\n";
+ m_info = NumericalIssue;
+ return;
+ }
+ RealScalar rdiag = sqrt(RealScalar(diag));
+ vals[colPtr[j]] = rdiag;
+ for (int i = j+1; i < n; i++)
+ {
+ //Scale
+ curCol(i) /= rdiag;
+ //Update the remaining diagonals with curCol
+ vals[colPtr[i]] -= curCol(i) * curCol(i);
+ }
+ // Select the largest p elements
+ // p is the original number of elements in the column (without the diagonal)
+ int p = colPtr[j+1] - colPtr[j] - 1 ;
+ internal::QuickSplit(curCol, irow, p);
+ // Insert the largest p elements in the matrix
+ int cpt = 0;
+ for (int i = colPtr[j]+1; i < colPtr[j+1]; i++)
+ {
+ vals[i] = curCol(cpt);
+ rowIdx[i] = irow(cpt);
+ cpt ++;
+ }
+ // Get the first smallest row index and put it after the diagonal element
+ Index jk = colPtr(j)+1;
+ updateList(colPtr,rowIdx,vals,j,jk,firstElt,listCol);
+ }
+ m_factorizationIsOk = true;
+ m_isInitialized = true;
+ m_info = Success;
+}
+
+template<typename Scalar, int _UpLo, typename OrderingType>
+template <typename IdxType, typename SclType>
+inline void IncompleteCholesky<Scalar,_UpLo, OrderingType>::updateList(const IdxType& colPtr, IdxType& rowIdx, SclType& vals, const Index& col, const Index& jk, IndexType& firstElt, VectorList& listCol)
+{
+ if (jk < colPtr(col+1) )
+ {
+ Index p = colPtr(col+1) - jk;
+ Index minpos;
+ rowIdx.segment(jk,p).minCoeff(&minpos);
+ minpos += jk;
+ if (rowIdx(minpos) != rowIdx(jk))
+ {
+ //Swap
+ std::swap(rowIdx(jk),rowIdx(minpos));
+ std::swap(vals(jk),vals(minpos));
+ }
+ firstElt(col) = jk;
+ listCol[rowIdx(jk)].push_back(col);
+ }
+}
+namespace internal {
+
+template<typename _Scalar, int _UpLo, typename OrderingType, typename Rhs>
+struct solve_retval<IncompleteCholesky<_Scalar, _UpLo, OrderingType>, Rhs>
+ : solve_retval_base<IncompleteCholesky<_Scalar, _UpLo, OrderingType>, Rhs>
+{
+ typedef IncompleteCholesky<_Scalar, _UpLo, OrderingType> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif
diff --git a/unsupported/Eigen/src/IterativeSolvers/IterationController.h b/unsupported/Eigen/src/IterativeSolvers/IterationController.h
index aaf46d544..c9c1a4be2 100644
--- a/unsupported/Eigen/src/IterativeSolvers/IterationController.h
+++ b/unsupported/Eigen/src/IterativeSolvers/IterationController.h
@@ -2,10 +2,6 @@
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
-//
-// This Source Code Form is subject to the terms of the Mozilla
-// Public License v. 2.0. If a copy of the MPL was not distributed
-// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
/* NOTE The class IterationController has been adapted from the iteration
* class of the GMM++ and ITL libraries.
@@ -129,7 +125,8 @@ class IterationController
bool converged() const { return m_res <= m_rhsn * m_resmax; }
bool converged(double nr)
{
- m_res = internal::abs(nr);
+ using std::abs;
+ m_res = abs(nr);
m_resminreach = (std::min)(m_resminreach, m_res);
return converged();
}
diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
new file mode 100644
index 000000000..0e56342a8
--- /dev/null
+++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
@@ -0,0 +1,302 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Giacomo Po <gpo@ucla.edu>
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#ifndef EIGEN_MINRES_H_
+#define EIGEN_MINRES_H_
+
+
+namespace Eigen {
+
+ namespace internal {
+
+ /** \internal Low-level MINRES algorithm
+ * \param mat The matrix A
+ * \param rhs The right hand side vector b
+ * \param x On input and initial solution, on output the computed solution.
+ * \param precond A right preconditioner being able to efficiently solve for an
+ * approximation of Ax=b (regardless of b)
+ * \param iters On input the max number of iteration, on output the number of performed iterations.
+ * \param tol_error On input the tolerance error, on output an estimation of the relative error.
+ */
+ template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
+ EIGEN_DONT_INLINE
+ void minres(const MatrixType& mat, const Rhs& rhs, Dest& x,
+ const Preconditioner& precond, int& iters,
+ typename Dest::RealScalar& tol_error)
+ {
+ using std::sqrt;
+ typedef typename Dest::RealScalar RealScalar;
+ typedef typename Dest::Scalar Scalar;
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+
+ // initialize
+ const int maxIters(iters); // initialize maxIters to iters
+ const int N(mat.cols()); // the size of the matrix
+ const RealScalar rhsNorm2(rhs.squaredNorm());
+ const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2)
+
+ // Initialize preconditioned Lanczos
+// VectorType v_old(N); // will be initialized inside loop
+ VectorType v( VectorType::Zero(N) ); //initialize v
+ VectorType v_new(rhs-mat*x); //initialize v_new
+ RealScalar residualNorm2(v_new.squaredNorm());
+// VectorType w(N); // will be initialized inside loop
+ VectorType w_new(precond.solve(v_new)); // initialize w_new
+// RealScalar beta; // will be initialized inside loop
+ RealScalar beta_new2(v_new.dot(w_new));
+ eigen_assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
+ RealScalar beta_new(sqrt(beta_new2));
+ const RealScalar beta_one(beta_new);
+ v_new /= beta_new;
+ w_new /= beta_new;
+ // Initialize other variables
+ RealScalar c(1.0); // the cosine of the Givens rotation
+ RealScalar c_old(1.0);
+ RealScalar s(0.0); // the sine of the Givens rotation
+ RealScalar s_old(0.0); // the sine of the Givens rotation
+// VectorType p_oold(N); // will be initialized in loop
+ VectorType p_old(VectorType::Zero(N)); // initialize p_old=0
+ VectorType p(p_old); // initialize p=0
+ RealScalar eta(1.0);
+
+ iters = 0; // reset iters
+ while ( iters < maxIters ){
+
+ // Preconditioned Lanczos
+ /* Note that there are 4 variants on the Lanczos algorithm. These are
+ * described in Paige, C. C. (1972). Computational variants of
+ * the Lanczos method for the eigenproblem. IMA Journal of Applied
+ * Mathematics, 10(3), 373–381. The current implementation corresponds
+ * to the case A(2,7) in the paper. It also corresponds to
+ * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear
+ * Systems, 2003 p.173. For the preconditioned version see
+ * A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987).
+ */
+ const RealScalar beta(beta_new);
+// v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter
+ const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT
+ v = v_new; // update
+// w = w_new; // update
+ const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT
+ v_new.noalias() = mat*w - beta*v_old; // compute v_new
+ const RealScalar alpha = v_new.dot(w);
+ v_new -= alpha*v; // overwrite v_new
+ w_new = precond.solve(v_new); // overwrite w_new
+ beta_new2 = v_new.dot(w_new); // compute beta_new
+ eigen_assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
+ beta_new = sqrt(beta_new2); // compute beta_new
+ v_new /= beta_new; // overwrite v_new for next iteration
+ w_new /= beta_new; // overwrite w_new for next iteration
+
+ // Givens rotation
+ const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration
+ const RealScalar r3 =s_old*beta; // s, s_old, c and c_old are still from previous iteration
+ const RealScalar r1_hat=c*alpha-c_old*s*beta;
+ const RealScalar r1 =sqrt( std::pow(r1_hat,2) + std::pow(beta_new,2) );
+ c_old = c; // store for next iteration
+ s_old = s; // store for next iteration
+ c=r1_hat/r1; // new cosine
+ s=beta_new/r1; // new sine
+
+ // Update solution
+// p_oold = p_old;
+ const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT
+ p_old = p;
+ p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED?
+ x += beta_one*c*eta*p;
+ residualNorm2 *= s*s;
+
+ if ( residualNorm2 < threshold2){
+ break;
+ }
+
+ eta=-s*eta; // update eta
+ iters++; // increment iteration number (for output purposes)
+ }
+ tol_error = std::sqrt(residualNorm2 / rhsNorm2); // return error. Note that this is the estimated error. The real error |Ax-b|/|b| may be slightly larger
+ }
+
+ }
+
+ template< typename _MatrixType, int _UpLo=Lower,
+ typename _Preconditioner = IdentityPreconditioner>
+// typename _Preconditioner = IdentityPreconditioner<typename _MatrixType::Scalar> > // preconditioner must be positive definite
+ class MINRES;
+
+ namespace internal {
+
+ template< typename _MatrixType, int _UpLo, typename _Preconditioner>
+ struct traits<MINRES<_MatrixType,_UpLo,_Preconditioner> >
+ {
+ typedef _MatrixType MatrixType;
+ typedef _Preconditioner Preconditioner;
+ };
+
+ }
+
+ /** \ingroup IterativeLinearSolvers_Module
+ * \brief A minimal residual solver for sparse symmetric problems
+ *
+ * This class allows to solve for A.x = b sparse linear problems using the MINRES algorithm
+ * of Paige and Saunders (1975). The sparse matrix A must be symmetric (possibly indefinite).
+ * The vectors x and b can be either dense or sparse.
+ *
+ * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
+ * or Upper. Default is Lower.
+ * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
+ *
+ * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
+ * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations
+ * and NumTraits<Scalar>::epsilon() for the tolerance.
+ *
+ * This class can be used as the direct solver classes. Here is a typical usage example:
+ * \code
+ * int n = 10000;
+ * VectorXd x(n), b(n);
+ * SparseMatrix<double> A(n,n);
+ * // fill A and b
+ * MINRES<SparseMatrix<double> > mr;
+ * mr.compute(A);
+ * x = mr.solve(b);
+ * std::cout << "#iterations: " << mr.iterations() << std::endl;
+ * std::cout << "estimated error: " << mr.error() << std::endl;
+ * // update b, and solve again
+ * x = mr.solve(b);
+ * \endcode
+ *
+ * By default the iterations start with x=0 as an initial guess of the solution.
+ * One can control the start using the solveWithGuess() method. Here is a step by
+ * step execution example starting with a random guess and printing the evolution
+ * of the estimated error:
+ * * \code
+ * x = VectorXd::Random(n);
+ * mr.setMaxIterations(1);
+ * int i = 0;
+ * do {
+ * x = mr.solveWithGuess(b,x);
+ * std::cout << i << " : " << mr.error() << std::endl;
+ * ++i;
+ * } while (mr.info()!=Success && i<100);
+ * \endcode
+ * Note that such a step by step excution is slightly slower.
+ *
+ * \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
+ */
+ template< typename _MatrixType, int _UpLo, typename _Preconditioner>
+ class MINRES : public IterativeSolverBase<MINRES<_MatrixType,_UpLo,_Preconditioner> >
+ {
+
+ typedef IterativeSolverBase<MINRES> Base;
+ using Base::mp_matrix;
+ using Base::m_error;
+ using Base::m_iterations;
+ using Base::m_info;
+ using Base::m_isInitialized;
+ public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef _Preconditioner Preconditioner;
+
+ enum {UpLo = _UpLo};
+
+ public:
+
+ /** Default constructor. */
+ MINRES() : Base() {}
+
+ /** Initialize the solver with matrix \a A for further \c Ax=b solving.
+ *
+ * This constructor is a shortcut for the default constructor followed
+ * by a call to compute().
+ *
+ * \warning this class stores a reference to the matrix A as well as some
+ * precomputed values that depend on it. Therefore, if \a A is changed
+ * this class becomes invalid. Call compute() to update it with the new
+ * matrix A, or modify a copy of A.
+ */
+ MINRES(const MatrixType& A) : Base(A) {}
+
+ /** Destructor. */
+ ~MINRES(){}
+
+ /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A
+ * \a x0 as an initial solution.
+ *
+ * \sa compute()
+ */
+ template<typename Rhs,typename Guess>
+ inline const internal::solve_retval_with_guess<MINRES, Rhs, Guess>
+ solveWithGuess(const MatrixBase<Rhs>& b, const Guess& x0) const
+ {
+ eigen_assert(m_isInitialized && "MINRES is not initialized.");
+ eigen_assert(Base::rows()==b.rows()
+ && "MINRES::solve(): invalid number of rows of the right hand side matrix b");
+ return internal::solve_retval_with_guess
+ <MINRES, Rhs, Guess>(*this, b.derived(), x0);
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solveWithGuess(const Rhs& b, Dest& x) const
+ {
+ m_iterations = Base::maxIterations();
+ m_error = Base::m_tolerance;
+
+ for(int j=0; j<b.cols(); ++j)
+ {
+ m_iterations = Base::maxIterations();
+ m_error = Base::m_tolerance;
+
+ typename Dest::ColXpr xj(x,j);
+ internal::minres(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
+ Base::m_preconditioner, m_iterations, m_error);
+ }
+
+ m_isInitialized = true;
+ m_info = m_error <= Base::m_tolerance ? Success : NoConvergence;
+ }
+
+ /** \internal */
+ template<typename Rhs,typename Dest>
+ void _solve(const Rhs& b, Dest& x) const
+ {
+ x.setZero();
+ _solveWithGuess(b,x);
+ }
+
+ protected:
+
+ };
+
+ namespace internal {
+
+ template<typename _MatrixType, int _UpLo, typename _Preconditioner, typename Rhs>
+ struct solve_retval<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
+ : solve_retval_base<MINRES<_MatrixType,_UpLo,_Preconditioner>, Rhs>
+ {
+ typedef MINRES<_MatrixType,_UpLo,_Preconditioner> Dec;
+ EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ dec()._solve(rhs(),dst);
+ }
+ };
+
+ } // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_MINRES_H
+
diff --git a/unsupported/Eigen/src/IterativeSolvers/Scaling.h b/unsupported/Eigen/src/IterativeSolvers/Scaling.h
index fdef0aca3..4fd439202 100644
--- a/unsupported/Eigen/src/IterativeSolvers/Scaling.h
+++ b/unsupported/Eigen/src/IterativeSolvers/Scaling.h
@@ -7,8 +7,8 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-#ifndef EIGEN_SCALING_H
-#define EIGEN_SCALING_H
+#ifndef EIGEN_ITERSCALING_H
+#define EIGEN_ITERSCALING_H
/**
* \ingroup IterativeSolvers_Module
* \brief iterative scaling algorithm to equilibrate rows and column norms in matrices
@@ -24,7 +24,7 @@
* VectorXd x(n), b(n);
* SparseMatrix<double> A;
* // fill A and b;
- * Scaling<SparseMatrix<double> > scal;
+ * IterScaling<SparseMatrix<double> > scal;
* // Compute the left and right scaling vectors. The matrix is equilibrated at output
* scal.computeRef(A);
* // Scale the right hand side
@@ -41,10 +41,10 @@
*
* \sa \ref IncompleteLUT
*/
+namespace Eigen {
using std::abs;
-using namespace Eigen;
template<typename _MatrixType>
-class Scaling
+class IterScaling
{
public:
typedef _MatrixType MatrixType;
@@ -52,15 +52,15 @@ class Scaling
typedef typename MatrixType::Index Index;
public:
- Scaling() { init(); }
+ IterScaling() { init(); }
- Scaling(const MatrixType& matrix)
+ IterScaling(const MatrixType& matrix)
{
init();
compute(matrix);
}
- ~Scaling() { }
+ ~IterScaling() { }
/**
* Compute the left and right diagonal matrices to scale the input matrix @p mat
@@ -73,7 +73,7 @@ class Scaling
{
int m = mat.rows();
int n = mat.cols();
- assert((m>0 && m == n) && "Please give a non - empty matrix");
+ eigen_assert((m>0 && m == n) && "Please give a non - empty matrix");
m_left.resize(m);
m_right.resize(n);
m_left.setOnes();
@@ -181,5 +181,5 @@ class Scaling
double m_tol;
int m_maxits; // Maximum number of iterations allowed
};
-
-#endif \ No newline at end of file
+}
+#endif
diff --git a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
index 84fd72fc6..532896c3b 100644
--- a/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
+++ b/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h
@@ -3,153 +3,240 @@
//
// Copyright (C) 2011 Kolja Brix <brix@igpm.rwth-aachen.de>
// Copyright (C) 2011 Andreas Platen <andiplaten@gmx.de>
+// Copyright (C) 2012 Chen-Pang He <jdh8@ms63.hinet.net>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
#ifndef KRONECKER_TENSOR_PRODUCT_H
#define KRONECKER_TENSOR_PRODUCT_H
-
namespace Eigen {
-namespace internal {
+template<typename Scalar, int Options, typename Index> class SparseMatrix;
/*!
- * Kronecker tensor product helper function for dense matrices
+ * \brief Kronecker tensor product helper class for dense matrices
*
- * \param A Dense matrix A
- * \param B Dense matrix B
- * \param AB_ Kronecker tensor product of A and B
+ * This class is the return value of kroneckerProduct(MatrixBase,
+ * MatrixBase). Use the function rather than construct this class
+ * directly to avoid specifying template prarameters.
+ *
+ * \tparam Lhs Type of the left-hand side, a matrix expression.
+ * \tparam Rhs Type of the rignt-hand side, a matrix expression.
*/
-template<typename Derived_A, typename Derived_B, typename Derived_AB>
-void kroneckerProduct_full(const Derived_A& A, const Derived_B& B, Derived_AB & AB)
+template<typename Lhs, typename Rhs>
+class KroneckerProduct : public ReturnByValue<KroneckerProduct<Lhs,Rhs> >
{
- const unsigned int Ar = A.rows(),
- Ac = A.cols(),
- Br = B.rows(),
- Bc = B.cols();
- for (unsigned int i=0; i<Ar; ++i)
- for (unsigned int j=0; j<Ac; ++j)
- AB.block(i*Br,j*Bc,Br,Bc) = A(i,j)*B;
-}
+ private:
+ typedef ReturnByValue<KroneckerProduct> Base;
+ typedef typename Base::Scalar Scalar;
+ typedef typename Base::Index Index;
+
+ public:
+ /*! \brief Constructor. */
+ KroneckerProduct(const Lhs& A, const Rhs& B)
+ : m_A(A), m_B(B)
+ {}
+
+ /*! \brief Evaluate the Kronecker tensor product. */
+ template<typename Dest> void evalTo(Dest& dst) const;
+
+ inline Index rows() const { return m_A.rows() * m_B.rows(); }
+ inline Index cols() const { return m_A.cols() * m_B.cols(); }
+
+ Scalar coeff(Index row, Index col) const
+ {
+ return m_A.coeff(row / m_B.rows(), col / m_B.cols()) *
+ m_B.coeff(row % m_B.rows(), col % m_B.cols());
+ }
+
+ Scalar coeff(Index i) const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(KroneckerProduct);
+ return m_A.coeff(i / m_A.size()) * m_B.coeff(i % m_A.size());
+ }
+ private:
+ typename Lhs::Nested m_A;
+ typename Rhs::Nested m_B;
+};
/*!
- * Kronecker tensor product helper function for matrices, where at least one is sparse
+ * \brief Kronecker tensor product helper class for sparse matrices
+ *
+ * If at least one of the operands is a sparse matrix expression,
+ * then this class is returned and evaluates into a sparse matrix.
+ *
+ * This class is the return value of kroneckerProduct(EigenBase,
+ * EigenBase). Use the function rather than construct this class
+ * directly to avoid specifying template prarameters.
*
- * \param A Matrix A
- * \param B Matrix B
- * \param AB_ Kronecker tensor product of A and B
+ * \tparam Lhs Type of the left-hand side, a matrix expression.
+ * \tparam Rhs Type of the rignt-hand side, a matrix expression.
*/
-template<typename Derived_A, typename Derived_B, typename Derived_AB>
-void kroneckerProduct_sparse(const Derived_A &A, const Derived_B &B, Derived_AB &AB)
+template<typename Lhs, typename Rhs>
+class KroneckerProductSparse : public EigenBase<KroneckerProductSparse<Lhs,Rhs> >
{
- const unsigned int Ar = A.rows(),
- Ac = A.cols(),
- Br = B.rows(),
- Bc = B.cols();
- AB.resize(Ar*Br,Ac*Bc);
- AB.resizeNonZeros(0);
- AB.reserve(A.nonZeros()*B.nonZeros());
-
- for (int kA=0; kA<A.outerSize(); ++kA)
+ private:
+ typedef typename internal::traits<KroneckerProductSparse>::Index Index;
+
+ public:
+ /*! \brief Constructor. */
+ KroneckerProductSparse(const Lhs& A, const Rhs& B)
+ : m_A(A), m_B(B)
+ {}
+
+ /*! \brief Evaluate the Kronecker tensor product. */
+ template<typename Dest> void evalTo(Dest& dst) const;
+
+ inline Index rows() const { return m_A.rows() * m_B.rows(); }
+ inline Index cols() const { return m_A.cols() * m_B.cols(); }
+
+ template<typename Scalar, int Options, typename Index>
+ operator SparseMatrix<Scalar, Options, Index>()
+ {
+ SparseMatrix<Scalar, Options, Index> result;
+ evalTo(result.derived());
+ return result;
+ }
+
+ private:
+ typename Lhs::Nested m_A;
+ typename Rhs::Nested m_B;
+};
+
+template<typename Lhs, typename Rhs>
+template<typename Dest>
+void KroneckerProduct<Lhs,Rhs>::evalTo(Dest& dst) const
+{
+ const int BlockRows = Rhs::RowsAtCompileTime,
+ BlockCols = Rhs::ColsAtCompileTime;
+ const Index Br = m_B.rows(),
+ Bc = m_B.cols();
+ for (Index i=0; i < m_A.rows(); ++i)
+ for (Index j=0; j < m_A.cols(); ++j)
+ Block<Dest,BlockRows,BlockCols>(dst,i*Br,j*Bc,Br,Bc) = m_A.coeff(i,j) * m_B;
+}
+
+template<typename Lhs, typename Rhs>
+template<typename Dest>
+void KroneckerProductSparse<Lhs,Rhs>::evalTo(Dest& dst) const
+{
+ const Index Br = m_B.rows(),
+ Bc = m_B.cols();
+ dst.resize(rows(),cols());
+ dst.resizeNonZeros(0);
+ dst.reserve(m_A.nonZeros() * m_B.nonZeros());
+
+ for (Index kA=0; kA < m_A.outerSize(); ++kA)
{
- for (int kB=0; kB<B.outerSize(); ++kB)
+ for (Index kB=0; kB < m_B.outerSize(); ++kB)
{
- for (typename Derived_A::InnerIterator itA(A,kA); itA; ++itA)
+ for (typename Lhs::InnerIterator itA(m_A,kA); itA; ++itA)
{
- for (typename Derived_B::InnerIterator itB(B,kB); itB; ++itB)
+ for (typename Rhs::InnerIterator itB(m_B,kB); itB; ++itB)
{
- const unsigned int iA = itA.row(),
- jA = itA.col(),
- iB = itB.row(),
- jB = itB.col(),
- i = iA*Br + iB,
- j = jA*Bc + jB;
- AB.insert(i,j) = itA.value() * itB.value();
+ const Index i = itA.row() * Br + itB.row(),
+ j = itA.col() * Bc + itB.col();
+ dst.insert(i,j) = itA.value() * itB.value();
}
}
}
}
}
-} // end namespace internal
+namespace internal {
+template<typename _Lhs, typename _Rhs>
+struct traits<KroneckerProduct<_Lhs,_Rhs> >
+{
+ typedef typename remove_all<_Lhs>::type Lhs;
+ typedef typename remove_all<_Rhs>::type Rhs;
+ typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
+
+ enum {
+ Rows = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
+ Cols = size_at_compile_time<traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime>::ret,
+ MaxRows = size_at_compile_time<traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime>::ret,
+ MaxCols = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret,
+ CoeffReadCost = Lhs::CoeffReadCost + Rhs::CoeffReadCost + NumTraits<Scalar>::MulCost
+ };
+
+ typedef Matrix<Scalar,Rows,Cols> ReturnType;
+};
+
+template<typename _Lhs, typename _Rhs>
+struct traits<KroneckerProductSparse<_Lhs,_Rhs> >
+{
+ typedef MatrixXpr XprKind;
+ typedef typename remove_all<_Lhs>::type Lhs;
+ typedef typename remove_all<_Rhs>::type Rhs;
+ typedef typename scalar_product_traits<typename Lhs::Scalar, typename Rhs::Scalar>::ReturnType Scalar;
+ typedef typename promote_storage_type<typename traits<Lhs>::StorageKind, typename traits<Rhs>::StorageKind>::ret StorageKind;
+ typedef typename promote_index_type<typename Lhs::Index, typename Rhs::Index>::type Index;
+
+ enum {
+ LhsFlags = Lhs::Flags,
+ RhsFlags = Rhs::Flags,
+
+ RowsAtCompileTime = size_at_compile_time<traits<Lhs>::RowsAtCompileTime, traits<Rhs>::RowsAtCompileTime>::ret,
+ ColsAtCompileTime = size_at_compile_time<traits<Lhs>::ColsAtCompileTime, traits<Rhs>::ColsAtCompileTime>::ret,
+ MaxRowsAtCompileTime = size_at_compile_time<traits<Lhs>::MaxRowsAtCompileTime, traits<Rhs>::MaxRowsAtCompileTime>::ret,
+ MaxColsAtCompileTime = size_at_compile_time<traits<Lhs>::MaxColsAtCompileTime, traits<Rhs>::MaxColsAtCompileTime>::ret,
+
+ EvalToRowMajor = (LhsFlags & RhsFlags & RowMajorBit),
+ RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
+
+ Flags = ((LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
+ | EvalBeforeNestingBit | EvalBeforeAssigningBit,
+ CoeffReadCost = Dynamic
+ };
+};
+} // end namespace internal
/*!
- * Computes Kronecker tensor product of two dense matrices
+ * \ingroup KroneckerProduct_Module
*
- * \param a Dense matrix a
- * \param b Dense matrix b
- * \param c Kronecker tensor product of a and b
- */
-template<typename A,typename B,typename CScalar,int CRows,int CCols, int COptions, int CMaxRows, int CMaxCols>
-void kroneckerProduct(const MatrixBase<A>& a, const MatrixBase<B>& b, Matrix<CScalar,CRows,CCols,COptions,CMaxRows,CMaxCols>& c)
-{
- c.resize(a.rows()*b.rows(),a.cols()*b.cols());
- internal::kroneckerProduct_full(a.derived(), b.derived(), c);
-}
-
-/*!
* Computes Kronecker tensor product of two dense matrices
*
- * Remark: this function uses the const cast hack and has been
- * implemented to make the function call possible, where the
- * output matrix is a submatrix, e.g.
- * kroneckerProduct(A,B,AB.block(2,5,6,6));
+ * \warning If you want to replace a matrix by its Kronecker product
+ * with some matrix, do \b NOT do this:
+ * \code
+ * A = kroneckerProduct(A,B); // bug!!! caused by aliasing effect
+ * \endcode
+ * instead, use eval() to work around this:
+ * \code
+ * A = kroneckerProduct(A,B).eval();
+ * \endcode
*
* \param a Dense matrix a
* \param b Dense matrix b
- * \param c Kronecker tensor product of a and b
+ * \return Kronecker tensor product of a and b
*/
-template<typename A,typename B,typename C>
-void kroneckerProduct(const MatrixBase<A>& a, const MatrixBase<B>& b, MatrixBase<C> const & c_)
+template<typename A, typename B>
+KroneckerProduct<A,B> kroneckerProduct(const MatrixBase<A>& a, const MatrixBase<B>& b)
{
- MatrixBase<C>& c = const_cast<MatrixBase<C>& >(c_);
- internal::kroneckerProduct_full(a.derived(), b.derived(), c.derived());
+ return KroneckerProduct<A, B>(a.derived(), b.derived());
}
/*!
- * Computes Kronecker tensor product of a dense and a sparse matrix
+ * \ingroup KroneckerProduct_Module
*
- * \param a Dense matrix a
- * \param b Sparse matrix b
- * \param c Kronecker tensor product of a and b
- */
-template<typename A,typename B,typename C>
-void kroneckerProduct(const MatrixBase<A>& a, const SparseMatrixBase<B>& b, SparseMatrixBase<C>& c)
-{
- internal::kroneckerProduct_sparse(a.derived(), b.derived(), c.derived());
-}
-
-/*!
- * Computes Kronecker tensor product of a sparse and a dense matrix
- *
- * \param a Sparse matrix a
- * \param b Dense matrix b
- * \param c Kronecker tensor product of a and b
- */
-template<typename A,typename B,typename C>
-void kroneckerProduct(const SparseMatrixBase<A>& a, const MatrixBase<B>& b, SparseMatrixBase<C>& c)
-{
- internal::kroneckerProduct_sparse(a.derived(), b.derived(), c.derived());
-}
-
-/*!
- * Computes Kronecker tensor product of two sparse matrices
+ * Computes Kronecker tensor product of two matrices, at least one of
+ * which is sparse
*
- * \param a Sparse matrix a
- * \param b Sparse matrix b
- * \param c Kronecker tensor product of a and b
+ * \param a Dense/sparse matrix a
+ * \param b Dense/sparse matrix b
+ * \return Kronecker tensor product of a and b, stored in a sparse
+ * matrix
*/
-template<typename A,typename B,typename C>
-void kroneckerProduct(const SparseMatrixBase<A>& a, const SparseMatrixBase<B>& b, SparseMatrixBase<C>& c)
+template<typename A, typename B>
+KroneckerProductSparse<A,B> kroneckerProduct(const EigenBase<A>& a, const EigenBase<B>& b)
{
- internal::kroneckerProduct_sparse(a.derived(), b.derived(), c.derived());
+ return KroneckerProductSparse<A,B>(a.derived(), b.derived());
}
} // end namespace Eigen
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt b/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt
new file mode 100644
index 000000000..8513803ce
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_LevenbergMarquardt_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_LevenbergMarquardt_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/LevenbergMarquardt COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/CopyrightMINPACK.txt b/unsupported/Eigen/src/LevenbergMarquardt/CopyrightMINPACK.txt
new file mode 100644
index 000000000..ae7984dae
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/CopyrightMINPACK.txt
@@ -0,0 +1,52 @@
+Minpack Copyright Notice (1999) University of Chicago. All rights reserved
+
+Redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the
+following conditions are met:
+
+1. Redistributions of source code must retain the above
+copyright notice, this list of conditions and the following
+disclaimer.
+
+2. Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following
+disclaimer in the documentation and/or other materials
+provided with the distribution.
+
+3. The end-user documentation included with the
+redistribution, if any, must include the following
+acknowledgment:
+
+ "This product includes software developed by the
+ University of Chicago, as Operator of Argonne National
+ Laboratory.
+
+Alternately, this acknowledgment may appear in the software
+itself, if and wherever such third-party acknowledgments
+normally appear.
+
+4. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+BE CORRECTED.
+
+5. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+(INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+POSSIBILITY OF SUCH LOSS OR DAMAGES.
+
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h b/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h
new file mode 100644
index 000000000..32d3ad518
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h
@@ -0,0 +1,85 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// This code initially comes from MINPACK whose original authors are:
+// Copyright Jorge More - Argonne National Laboratory
+// Copyright Burt Garbow - Argonne National Laboratory
+// Copyright Ken Hillstrom - Argonne National Laboratory
+//
+// This Source Code Form is subject to the terms of the Minpack license
+// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
+
+#ifndef EIGEN_LMCOVAR_H
+#define EIGEN_LMCOVAR_H
+
+namespace Eigen {
+
+namespace internal {
+
+template <typename Scalar>
+void covar(
+ Matrix< Scalar, Dynamic, Dynamic > &r,
+ const VectorXi& ipvt,
+ Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon()) )
+{
+ using std::abs;
+ typedef DenseIndex Index;
+ /* Local variables */
+ Index i, j, k, l, ii, jj;
+ bool sing;
+ Scalar temp;
+
+ /* Function Body */
+ const Index n = r.cols();
+ const Scalar tolr = tol * abs(r(0,0));
+ Matrix< Scalar, Dynamic, 1 > wa(n);
+ eigen_assert(ipvt.size()==n);
+
+ /* form the inverse of r in the full upper triangle of r. */
+ l = -1;
+ for (k = 0; k < n; ++k)
+ if (abs(r(k,k)) > tolr) {
+ r(k,k) = 1. / r(k,k);
+ for (j = 0; j <= k-1; ++j) {
+ temp = r(k,k) * r(j,k);
+ r(j,k) = 0.;
+ r.col(k).head(j+1) -= r.col(j).head(j+1) * temp;
+ }
+ l = k;
+ }
+
+ /* form the full upper triangle of the inverse of (r transpose)*r */
+ /* in the full upper triangle of r. */
+ for (k = 0; k <= l; ++k) {
+ for (j = 0; j <= k-1; ++j)
+ r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k);
+ r.col(k).head(k+1) *= r(k,k);
+ }
+
+ /* form the full lower triangle of the covariance matrix */
+ /* in the strict lower triangle of r and in wa. */
+ for (j = 0; j < n; ++j) {
+ jj = ipvt[j];
+ sing = j > l;
+ for (i = 0; i <= j; ++i) {
+ if (sing)
+ r(i,j) = 0.;
+ ii = ipvt[i];
+ if (ii > jj)
+ r(ii,jj) = r(i,j);
+ if (ii < jj)
+ r(jj,ii) = r(i,j);
+ }
+ wa[jj] = r(j,j);
+ }
+
+ /* symmetrize the covariance matrix in r. */
+ r.topLeftCorner(n,n).template triangularView<StrictlyUpper>() = r.topLeftCorner(n,n).transpose();
+ r.diagonal() = wa;
+}
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_LMCOVAR_H
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h b/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h
new file mode 100644
index 000000000..25b32ec5b
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h
@@ -0,0 +1,202 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+//
+// This code initially comes from MINPACK whose original authors are:
+// Copyright Jorge More - Argonne National Laboratory
+// Copyright Burt Garbow - Argonne National Laboratory
+// Copyright Ken Hillstrom - Argonne National Laboratory
+//
+// This Source Code Form is subject to the terms of the Minpack license
+// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
+
+#ifndef EIGEN_LMONESTEP_H
+#define EIGEN_LMONESTEP_H
+
+namespace Eigen {
+
+template<typename FunctorType>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType>::minimizeOneStep(FVectorType &x)
+{
+ using std::abs;
+ using std::sqrt;
+ RealScalar temp, temp1,temp2;
+ RealScalar ratio;
+ RealScalar pnorm, xnorm, fnorm1, actred, dirder, prered;
+ eigen_assert(x.size()==n); // check the caller is not cheating us
+
+ temp = 0.0; xnorm = 0.0;
+ /* calculate the jacobian matrix. */
+ Index df_ret = m_functor.df(x, m_fjac);
+ if (df_ret<0)
+ return LevenbergMarquardtSpace::UserAsked;
+ if (df_ret>0)
+ // numerical diff, we evaluated the function df_ret times
+ m_nfev += df_ret;
+ else m_njev++;
+
+ /* compute the qr factorization of the jacobian. */
+ for (int j = 0; j < x.size(); ++j)
+ m_wa2(j) = m_fjac.col(j).blueNorm();
+ QRSolver qrfac(m_fjac);
+ if(qrfac.info() != Success) {
+ m_info = NumericalIssue;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+ }
+ // Make a copy of the first factor with the associated permutation
+ m_rfactor = qrfac.matrixR();
+ m_permutation = (qrfac.colsPermutation());
+
+ /* on the first iteration and if external scaling is not used, scale according */
+ /* to the norms of the columns of the initial jacobian. */
+ if (m_iter == 1) {
+ if (!m_useExternalScaling)
+ for (Index j = 0; j < n; ++j)
+ m_diag[j] = (m_wa2[j]==0.)? 1. : m_wa2[j];
+
+ /* on the first iteration, calculate the norm of the scaled x */
+ /* and initialize the step bound m_delta. */
+ xnorm = m_diag.cwiseProduct(x).stableNorm();
+ m_delta = m_factor * xnorm;
+ if (m_delta == 0.)
+ m_delta = m_factor;
+ }
+
+ /* form (q transpose)*m_fvec and store the first n components in */
+ /* m_qtf. */
+ m_wa4 = m_fvec;
+ m_wa4 = qrfac.matrixQ().adjoint() * m_fvec;
+ m_qtf = m_wa4.head(n);
+
+ /* compute the norm of the scaled gradient. */
+ m_gnorm = 0.;
+ if (m_fnorm != 0.)
+ for (Index j = 0; j < n; ++j)
+ if (m_wa2[m_permutation.indices()[j]] != 0.)
+ m_gnorm = (std::max)(m_gnorm, abs( m_rfactor.col(j).head(j+1).dot(m_qtf.head(j+1)/m_fnorm) / m_wa2[m_permutation.indices()[j]]));
+
+ /* test for convergence of the gradient norm. */
+ if (m_gnorm <= m_gtol) {
+ m_info = Success;
+ return LevenbergMarquardtSpace::CosinusTooSmall;
+ }
+
+ /* rescale if necessary. */
+ if (!m_useExternalScaling)
+ m_diag = m_diag.cwiseMax(m_wa2);
+
+ do {
+ /* determine the levenberg-marquardt parameter. */
+ internal::lmpar2(qrfac, m_diag, m_qtf, m_delta, m_par, m_wa1);
+
+ /* store the direction p and x + p. calculate the norm of p. */
+ m_wa1 = -m_wa1;
+ m_wa2 = x + m_wa1;
+ pnorm = m_diag.cwiseProduct(m_wa1).stableNorm();
+
+ /* on the first iteration, adjust the initial step bound. */
+ if (m_iter == 1)
+ m_delta = (std::min)(m_delta,pnorm);
+
+ /* evaluate the function at x + p and calculate its norm. */
+ if ( m_functor(m_wa2, m_wa4) < 0)
+ return LevenbergMarquardtSpace::UserAsked;
+ ++m_nfev;
+ fnorm1 = m_wa4.stableNorm();
+
+ /* compute the scaled actual reduction. */
+ actred = -1.;
+ if (Scalar(.1) * fnorm1 < m_fnorm)
+ actred = 1. - numext::abs2(fnorm1 / m_fnorm);
+
+ /* compute the scaled predicted reduction and */
+ /* the scaled directional derivative. */
+ m_wa3 = m_rfactor.template triangularView<Upper>() * (m_permutation.inverse() *m_wa1);
+ temp1 = numext::abs2(m_wa3.stableNorm() / m_fnorm);
+ temp2 = numext::abs2(sqrt(m_par) * pnorm / m_fnorm);
+ prered = temp1 + temp2 / Scalar(.5);
+ dirder = -(temp1 + temp2);
+
+ /* compute the ratio of the actual to the predicted */
+ /* reduction. */
+ ratio = 0.;
+ if (prered != 0.)
+ ratio = actred / prered;
+
+ /* update the step bound. */
+ if (ratio <= Scalar(.25)) {
+ if (actred >= 0.)
+ temp = RealScalar(.5);
+ if (actred < 0.)
+ temp = RealScalar(.5) * dirder / (dirder + RealScalar(.5) * actred);
+ if (RealScalar(.1) * fnorm1 >= m_fnorm || temp < RealScalar(.1))
+ temp = Scalar(.1);
+ /* Computing MIN */
+ m_delta = temp * (std::min)(m_delta, pnorm / RealScalar(.1));
+ m_par /= temp;
+ } else if (!(m_par != 0. && ratio < RealScalar(.75))) {
+ m_delta = pnorm / RealScalar(.5);
+ m_par = RealScalar(.5) * m_par;
+ }
+
+ /* test for successful iteration. */
+ if (ratio >= RealScalar(1e-4)) {
+ /* successful iteration. update x, m_fvec, and their norms. */
+ x = m_wa2;
+ m_wa2 = m_diag.cwiseProduct(x);
+ m_fvec = m_wa4;
+ xnorm = m_wa2.stableNorm();
+ m_fnorm = fnorm1;
+ ++m_iter;
+ }
+
+ /* tests for convergence. */
+ if (abs(actred) <= m_ftol && prered <= m_ftol && Scalar(.5) * ratio <= 1. && m_delta <= m_xtol * xnorm)
+ {
+ m_info = Success;
+ return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall;
+ }
+ if (abs(actred) <= m_ftol && prered <= m_ftol && Scalar(.5) * ratio <= 1.)
+ {
+ m_info = Success;
+ return LevenbergMarquardtSpace::RelativeReductionTooSmall;
+ }
+ if (m_delta <= m_xtol * xnorm)
+ {
+ m_info = Success;
+ return LevenbergMarquardtSpace::RelativeErrorTooSmall;
+ }
+
+ /* tests for termination and stringent tolerances. */
+ if (m_nfev >= m_maxfev)
+ {
+ m_info = NoConvergence;
+ return LevenbergMarquardtSpace::TooManyFunctionEvaluation;
+ }
+ if (abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
+ {
+ m_info = Success;
+ return LevenbergMarquardtSpace::FtolTooSmall;
+ }
+ if (m_delta <= NumTraits<Scalar>::epsilon() * xnorm)
+ {
+ m_info = Success;
+ return LevenbergMarquardtSpace::XtolTooSmall;
+ }
+ if (m_gnorm <= NumTraits<Scalar>::epsilon())
+ {
+ m_info = Success;
+ return LevenbergMarquardtSpace::GtolTooSmall;
+ }
+
+ } while (ratio < Scalar(1e-4));
+
+ return LevenbergMarquardtSpace::Running;
+}
+
+
+} // end namespace Eigen
+
+#endif // EIGEN_LMONESTEP_H
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h b/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h
new file mode 100644
index 000000000..9532042d9
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h
@@ -0,0 +1,160 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// This code initially comes from MINPACK whose original authors are:
+// Copyright Jorge More - Argonne National Laboratory
+// Copyright Burt Garbow - Argonne National Laboratory
+// Copyright Ken Hillstrom - Argonne National Laboratory
+//
+// This Source Code Form is subject to the terms of the Minpack license
+// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
+
+#ifndef EIGEN_LMPAR_H
+#define EIGEN_LMPAR_H
+
+namespace Eigen {
+
+namespace internal {
+
+ template <typename QRSolver, typename VectorType>
+ void lmpar2(
+ const QRSolver &qr,
+ const VectorType &diag,
+ const VectorType &qtb,
+ typename VectorType::Scalar m_delta,
+ typename VectorType::Scalar &par,
+ VectorType &x)
+
+ {
+ using std::sqrt;
+ using std::abs;
+ typedef typename QRSolver::MatrixType MatrixType;
+ typedef typename QRSolver::Scalar Scalar;
+ typedef typename QRSolver::Index Index;
+
+ /* Local variables */
+ Index j;
+ Scalar fp;
+ Scalar parc, parl;
+ Index iter;
+ Scalar temp, paru;
+ Scalar gnorm;
+ Scalar dxnorm;
+
+ // Make a copy of the triangular factor.
+ // This copy is modified during call the qrsolv
+ MatrixType s;
+ s = qr.matrixR();
+
+ /* Function Body */
+ const Scalar dwarf = (std::numeric_limits<Scalar>::min)();
+ const Index n = qr.matrixR().cols();
+ eigen_assert(n==diag.size());
+ eigen_assert(n==qtb.size());
+
+ VectorType wa1, wa2;
+
+ /* compute and store in x the gauss-newton direction. if the */
+ /* jacobian is rank-deficient, obtain a least squares solution. */
+
+ // const Index rank = qr.nonzeroPivots(); // exactly double(0.)
+ const Index rank = qr.rank(); // use a threshold
+ wa1 = qtb;
+ wa1.tail(n-rank).setZero();
+ //FIXME There is no solve in place for sparse triangularView
+ wa1.head(rank) = s.topLeftCorner(rank,rank).template triangularView<Upper>().solve(qtb.head(rank));
+
+ x = qr.colsPermutation()*wa1;
+
+ /* initialize the iteration counter. */
+ /* evaluate the function at the origin, and test */
+ /* for acceptance of the gauss-newton direction. */
+ iter = 0;
+ wa2 = diag.cwiseProduct(x);
+ dxnorm = wa2.blueNorm();
+ fp = dxnorm - m_delta;
+ if (fp <= Scalar(0.1) * m_delta) {
+ par = 0;
+ return;
+ }
+
+ /* if the jacobian is not rank deficient, the newton */
+ /* step provides a lower bound, parl, for the zero of */
+ /* the function. otherwise set this bound to zero. */
+ parl = 0.;
+ if (rank==n) {
+ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm;
+ s.topLeftCorner(n,n).transpose().template triangularView<Lower>().solveInPlace(wa1);
+ temp = wa1.blueNorm();
+ parl = fp / m_delta / temp / temp;
+ }
+
+ /* calculate an upper bound, paru, for the zero of the function. */
+ for (j = 0; j < n; ++j)
+ wa1[j] = s.col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)];
+
+ gnorm = wa1.stableNorm();
+ paru = gnorm / m_delta;
+ if (paru == 0.)
+ paru = dwarf / (std::min)(m_delta,Scalar(0.1));
+
+ /* if the input par lies outside of the interval (parl,paru), */
+ /* set par to the closer endpoint. */
+ par = (std::max)(par,parl);
+ par = (std::min)(par,paru);
+ if (par == 0.)
+ par = gnorm / dxnorm;
+
+ /* beginning of an iteration. */
+ while (true) {
+ ++iter;
+
+ /* evaluate the function at the current value of par. */
+ if (par == 0.)
+ par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */
+ wa1 = sqrt(par)* diag;
+
+ VectorType sdiag(n);
+ lmqrsolv(s, qr.colsPermutation(), wa1, qtb, x, sdiag);
+
+ wa2 = diag.cwiseProduct(x);
+ dxnorm = wa2.blueNorm();
+ temp = fp;
+ fp = dxnorm - m_delta;
+
+ /* if the function is small enough, accept the current value */
+ /* of par. also test for the exceptional cases where parl */
+ /* is zero or the number of iterations has reached 10. */
+ if (abs(fp) <= Scalar(0.1) * m_delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10)
+ break;
+
+ /* compute the newton correction. */
+ wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm);
+ // we could almost use this here, but the diagonal is outside qr, in sdiag[]
+ for (j = 0; j < n; ++j) {
+ wa1[j] /= sdiag[j];
+ temp = wa1[j];
+ for (Index i = j+1; i < n; ++i)
+ wa1[i] -= s.coeff(i,j) * temp;
+ }
+ temp = wa1.blueNorm();
+ parc = fp / m_delta / temp / temp;
+
+ /* depending on the sign of the function, update parl or paru. */
+ if (fp > 0.)
+ parl = (std::max)(parl,par);
+ if (fp < 0.)
+ paru = (std::min)(paru,par);
+
+ /* compute an improved estimate for par. */
+ par = (std::max)(parl,par+parc);
+ }
+ if (iter == 0)
+ par = 0.;
+ return;
+ }
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_LMPAR_H
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
new file mode 100644
index 000000000..f5290dee4
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h
@@ -0,0 +1,189 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+// Copyright (C) 2012 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
+//
+// This code initially comes from MINPACK whose original authors are:
+// Copyright Jorge More - Argonne National Laboratory
+// Copyright Burt Garbow - Argonne National Laboratory
+// Copyright Ken Hillstrom - Argonne National Laboratory
+//
+// This Source Code Form is subject to the terms of the Minpack license
+// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
+
+#ifndef EIGEN_LMQRSOLV_H
+#define EIGEN_LMQRSOLV_H
+
+namespace Eigen {
+
+namespace internal {
+
+template <typename Scalar,int Rows, int Cols, typename Index>
+void lmqrsolv(
+ Matrix<Scalar,Rows,Cols> &s,
+ const PermutationMatrix<Dynamic,Dynamic,Index> &iPerm,
+ const Matrix<Scalar,Dynamic,1> &diag,
+ const Matrix<Scalar,Dynamic,1> &qtb,
+ Matrix<Scalar,Dynamic,1> &x,
+ Matrix<Scalar,Dynamic,1> &sdiag)
+{
+
+ /* Local variables */
+ Index i, j, k, l;
+ Scalar temp;
+ Index n = s.cols();
+ Matrix<Scalar,Dynamic,1> wa(n);
+ JacobiRotation<Scalar> givens;
+
+ /* Function Body */
+ // the following will only change the lower triangular part of s, including
+ // the diagonal, though the diagonal is restored afterward
+
+ /* copy r and (q transpose)*b to preserve input and initialize s. */
+ /* in particular, save the diagonal elements of r in x. */
+ x = s.diagonal();
+ wa = qtb;
+
+
+ s.topLeftCorner(n,n).template triangularView<StrictlyLower>() = s.topLeftCorner(n,n).transpose();
+ /* eliminate the diagonal matrix d using a givens rotation. */
+ for (j = 0; j < n; ++j) {
+
+ /* prepare the row of d to be eliminated, locating the */
+ /* diagonal element using p from the qr factorization. */
+ l = iPerm.indices()(j);
+ if (diag[l] == 0.)
+ break;
+ sdiag.tail(n-j).setZero();
+ sdiag[j] = diag[l];
+
+ /* the transformations to eliminate the row of d */
+ /* modify only a single element of (q transpose)*b */
+ /* beyond the first n, which is initially zero. */
+ Scalar qtbpj = 0.;
+ for (k = j; k < n; ++k) {
+ /* determine a givens rotation which eliminates the */
+ /* appropriate element in the current row of d. */
+ givens.makeGivens(-s(k,k), sdiag[k]);
+
+ /* compute the modified diagonal element of r and */
+ /* the modified element of ((q transpose)*b,0). */
+ s(k,k) = givens.c() * s(k,k) + givens.s() * sdiag[k];
+ temp = givens.c() * wa[k] + givens.s() * qtbpj;
+ qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj;
+ wa[k] = temp;
+
+ /* accumulate the tranformation in the row of s. */
+ for (i = k+1; i<n; ++i) {
+ temp = givens.c() * s(i,k) + givens.s() * sdiag[i];
+ sdiag[i] = -givens.s() * s(i,k) + givens.c() * sdiag[i];
+ s(i,k) = temp;
+ }
+ }
+ }
+
+ /* solve the triangular system for z. if the system is */
+ /* singular, then obtain a least squares solution. */
+ Index nsing;
+ for(nsing=0; nsing<n && sdiag[nsing]!=0; nsing++) {}
+
+ wa.tail(n-nsing).setZero();
+ s.topLeftCorner(nsing, nsing).transpose().template triangularView<Upper>().solveInPlace(wa.head(nsing));
+
+ // restore
+ sdiag = s.diagonal();
+ s.diagonal() = x;
+
+ /* permute the components of z back to components of x. */
+ x = iPerm * wa;
+}
+
+template <typename Scalar, int _Options, typename Index>
+void lmqrsolv(
+ SparseMatrix<Scalar,_Options,Index> &s,
+ const PermutationMatrix<Dynamic,Dynamic> &iPerm,
+ const Matrix<Scalar,Dynamic,1> &diag,
+ const Matrix<Scalar,Dynamic,1> &qtb,
+ Matrix<Scalar,Dynamic,1> &x,
+ Matrix<Scalar,Dynamic,1> &sdiag)
+{
+ /* Local variables */
+ typedef SparseMatrix<Scalar,RowMajor,Index> FactorType;
+ Index i, j, k, l;
+ Scalar temp;
+ Index n = s.cols();
+ Matrix<Scalar,Dynamic,1> wa(n);
+ JacobiRotation<Scalar> givens;
+
+ /* Function Body */
+ // the following will only change the lower triangular part of s, including
+ // the diagonal, though the diagonal is restored afterward
+
+ /* copy r and (q transpose)*b to preserve input and initialize R. */
+ wa = qtb;
+ FactorType R(s);
+ // Eliminate the diagonal matrix d using a givens rotation
+ for (j = 0; j < n; ++j)
+ {
+ // Prepare the row of d to be eliminated, locating the
+ // diagonal element using p from the qr factorization
+ l = iPerm.indices()(j);
+ if (diag(l) == Scalar(0))
+ break;
+ sdiag.tail(n-j).setZero();
+ sdiag[j] = diag[l];
+ // the transformations to eliminate the row of d
+ // modify only a single element of (q transpose)*b
+ // beyond the first n, which is initially zero.
+
+ Scalar qtbpj = 0;
+ // Browse the nonzero elements of row j of the upper triangular s
+ for (k = j; k < n; ++k)
+ {
+ typename FactorType::InnerIterator itk(R,k);
+ for (; itk; ++itk){
+ if (itk.index() < k) continue;
+ else break;
+ }
+ //At this point, we have the diagonal element R(k,k)
+ // Determine a givens rotation which eliminates
+ // the appropriate element in the current row of d
+ givens.makeGivens(-itk.value(), sdiag(k));
+
+ // Compute the modified diagonal element of r and
+ // the modified element of ((q transpose)*b,0).
+ itk.valueRef() = givens.c() * itk.value() + givens.s() * sdiag(k);
+ temp = givens.c() * wa(k) + givens.s() * qtbpj;
+ qtbpj = -givens.s() * wa(k) + givens.c() * qtbpj;
+ wa(k) = temp;
+
+ // Accumulate the transformation in the remaining k row/column of R
+ for (++itk; itk; ++itk)
+ {
+ i = itk.index();
+ temp = givens.c() * itk.value() + givens.s() * sdiag(i);
+ sdiag(i) = -givens.s() * itk.value() + givens.c() * sdiag(i);
+ itk.valueRef() = temp;
+ }
+ }
+ }
+
+ // Solve the triangular system for z. If the system is
+ // singular, then obtain a least squares solution
+ Index nsing;
+ for(nsing = 0; nsing<n && sdiag(nsing) !=0; nsing++) {}
+
+ wa.tail(n-nsing).setZero();
+// x = wa;
+ wa.head(nsing) = R.topLeftCorner(nsing,nsing).template triangularView<Upper>().solve/*InPlace*/(wa.head(nsing));
+
+ sdiag = R.diagonal();
+ // Permute the components of z back to components of x
+ x = iPerm * wa;
+}
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_LMQRSOLV_H
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
new file mode 100644
index 000000000..51dd1d3c4
--- /dev/null
+++ b/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h
@@ -0,0 +1,377 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Thomas Capricelli <orzel@freehackers.org>
+// Copyright (C) 2012 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
+//
+// The algorithm of this class initially comes from MINPACK whose original authors are:
+// Copyright Jorge More - Argonne National Laboratory
+// Copyright Burt Garbow - Argonne National Laboratory
+// Copyright Ken Hillstrom - Argonne National Laboratory
+//
+// This Source Code Form is subject to the terms of the Minpack license
+// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file.
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_LEVENBERGMARQUARDT_H
+#define EIGEN_LEVENBERGMARQUARDT_H
+
+
+namespace Eigen {
+namespace LevenbergMarquardtSpace {
+ enum Status {
+ NotStarted = -2,
+ Running = -1,
+ ImproperInputParameters = 0,
+ RelativeReductionTooSmall = 1,
+ RelativeErrorTooSmall = 2,
+ RelativeErrorAndReductionTooSmall = 3,
+ CosinusTooSmall = 4,
+ TooManyFunctionEvaluation = 5,
+ FtolTooSmall = 6,
+ XtolTooSmall = 7,
+ GtolTooSmall = 8,
+ UserAsked = 9
+ };
+}
+
+template <typename _Scalar, int NX=Dynamic, int NY=Dynamic>
+struct DenseFunctor
+{
+ typedef _Scalar Scalar;
+ enum {
+ InputsAtCompileTime = NX,
+ ValuesAtCompileTime = NY
+ };
+ typedef Matrix<Scalar,InputsAtCompileTime,1> InputType;
+ typedef Matrix<Scalar,ValuesAtCompileTime,1> ValueType;
+ typedef Matrix<Scalar,ValuesAtCompileTime,InputsAtCompileTime> JacobianType;
+ typedef ColPivHouseholderQR<JacobianType> QRSolver;
+ const int m_inputs, m_values;
+
+ DenseFunctor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {}
+ DenseFunctor(int inputs, int values) : m_inputs(inputs), m_values(values) {}
+
+ int inputs() const { return m_inputs; }
+ int values() const { return m_values; }
+
+ //int operator()(const InputType &x, ValueType& fvec) { }
+ // should be defined in derived classes
+
+ //int df(const InputType &x, JacobianType& fjac) { }
+ // should be defined in derived classes
+};
+
+template <typename _Scalar, typename _Index>
+struct SparseFunctor
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Matrix<Scalar,Dynamic,1> InputType;
+ typedef Matrix<Scalar,Dynamic,1> ValueType;
+ typedef SparseMatrix<Scalar, ColMajor, Index> JacobianType;
+ typedef SparseQR<JacobianType, COLAMDOrdering<int> > QRSolver;
+ enum {
+ InputsAtCompileTime = Dynamic,
+ ValuesAtCompileTime = Dynamic
+ };
+
+ SparseFunctor(int inputs, int values) : m_inputs(inputs), m_values(values) {}
+
+ int inputs() const { return m_inputs; }
+ int values() const { return m_values; }
+
+ const int m_inputs, m_values;
+ //int operator()(const InputType &x, ValueType& fvec) { }
+ // to be defined in the functor
+
+ //int df(const InputType &x, JacobianType& fjac) { }
+ // to be defined in the functor if no automatic differentiation
+
+};
+namespace internal {
+template <typename QRSolver, typename VectorType>
+void lmpar2(const QRSolver &qr, const VectorType &diag, const VectorType &qtb,
+ typename VectorType::Scalar m_delta, typename VectorType::Scalar &par,
+ VectorType &x);
+ }
+/**
+ * \ingroup NonLinearOptimization_Module
+ * \brief Performs non linear optimization over a non-linear function,
+ * using a variant of the Levenberg Marquardt algorithm.
+ *
+ * Check wikipedia for more information.
+ * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm
+ */
+template<typename _FunctorType>
+class LevenbergMarquardt : internal::no_assignment_operator
+{
+ public:
+ typedef _FunctorType FunctorType;
+ typedef typename FunctorType::QRSolver QRSolver;
+ typedef typename FunctorType::JacobianType JacobianType;
+ typedef typename JacobianType::Scalar Scalar;
+ typedef typename JacobianType::RealScalar RealScalar;
+ typedef typename JacobianType::Index Index;
+ typedef typename QRSolver::Index PermIndex;
+ typedef Matrix<Scalar,Dynamic,1> FVectorType;
+ typedef PermutationMatrix<Dynamic,Dynamic> PermutationType;
+ public:
+ LevenbergMarquardt(FunctorType& functor)
+ : m_functor(functor),m_nfev(0),m_njev(0),m_fnorm(0.0),m_gnorm(0),
+ m_isInitialized(false),m_info(InvalidInput)
+ {
+ resetParameters();
+ m_useExternalScaling=false;
+ }
+
+ LevenbergMarquardtSpace::Status minimize(FVectorType &x);
+ LevenbergMarquardtSpace::Status minimizeInit(FVectorType &x);
+ LevenbergMarquardtSpace::Status minimizeOneStep(FVectorType &x);
+ LevenbergMarquardtSpace::Status lmder1(
+ FVectorType &x,
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ );
+ static LevenbergMarquardtSpace::Status lmdif1(
+ FunctorType &functor,
+ FVectorType &x,
+ Index *nfev,
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
+ );
+
+ /** Sets the default parameters */
+ void resetParameters()
+ {
+ m_factor = 100.;
+ m_maxfev = 400;
+ m_ftol = std::sqrt(NumTraits<RealScalar>::epsilon());
+ m_xtol = std::sqrt(NumTraits<RealScalar>::epsilon());
+ m_gtol = 0. ;
+ m_epsfcn = 0. ;
+ }
+
+ /** Sets the tolerance for the norm of the solution vector*/
+ void setXtol(RealScalar xtol) { m_xtol = xtol; }
+
+ /** Sets the tolerance for the norm of the vector function*/
+ void setFtol(RealScalar ftol) { m_ftol = ftol; }
+
+ /** Sets the tolerance for the norm of the gradient of the error vector*/
+ void setGtol(RealScalar gtol) { m_gtol = gtol; }
+
+ /** Sets the step bound for the diagonal shift */
+ void setFactor(RealScalar factor) { m_factor = factor; }
+
+ /** Sets the error precision */
+ void setEpsilon (RealScalar epsfcn) { m_epsfcn = epsfcn; }
+
+ /** Sets the maximum number of function evaluation */
+ void setMaxfev(Index maxfev) {m_maxfev = maxfev; }
+
+ /** Use an external Scaling. If set to true, pass a nonzero diagonal to diag() */
+ void setExternalScaling(bool value) {m_useExternalScaling = value; }
+
+ /** \returns a reference to the diagonal of the jacobian */
+ FVectorType& diag() {return m_diag; }
+
+ /** \returns the number of iterations performed */
+ Index iterations() { return m_iter; }
+
+ /** \returns the number of functions evaluation */
+ Index nfev() { return m_nfev; }
+
+ /** \returns the number of jacobian evaluation */
+ Index njev() { return m_njev; }
+
+ /** \returns the norm of current vector function */
+ RealScalar fnorm() {return m_fnorm; }
+
+ /** \returns the norm of the gradient of the error */
+ RealScalar gnorm() {return m_gnorm; }
+
+ /** \returns the LevenbergMarquardt parameter */
+ RealScalar lm_param(void) { return m_par; }
+
+ /** \returns a reference to the current vector function
+ */
+ FVectorType& fvec() {return m_fvec; }
+
+ /** \returns a reference to the matrix where the current Jacobian matrix is stored
+ */
+ JacobianType& jacobian() {return m_fjac; }
+
+ /** \returns a reference to the triangular matrix R from the QR of the jacobian matrix.
+ * \sa jacobian()
+ */
+ JacobianType& matrixR() {return m_rfactor; }
+
+ /** the permutation used in the QR factorization
+ */
+ PermutationType permutation() {return m_permutation; }
+
+ /**
+ * \brief Reports whether the minimization was successful
+ * \returns \c Success if the minimization was succesful,
+ * \c NumericalIssue if a numerical problem arises during the
+ * minimization process, for exemple during the QR factorization
+ * \c NoConvergence if the minimization did not converge after
+ * the maximum number of function evaluation allowed
+ * \c InvalidInput if the input matrix is invalid
+ */
+ ComputationInfo info() const
+ {
+
+ return m_info;
+ }
+ private:
+ JacobianType m_fjac;
+ JacobianType m_rfactor; // The triangular matrix R from the QR of the jacobian matrix m_fjac
+ FunctorType &m_functor;
+ FVectorType m_fvec, m_qtf, m_diag;
+ Index n;
+ Index m;
+ Index m_nfev;
+ Index m_njev;
+ RealScalar m_fnorm; // Norm of the current vector function
+ RealScalar m_gnorm; //Norm of the gradient of the error
+ RealScalar m_factor; //
+ Index m_maxfev; // Maximum number of function evaluation
+ RealScalar m_ftol; //Tolerance in the norm of the vector function
+ RealScalar m_xtol; //
+ RealScalar m_gtol; //tolerance of the norm of the error gradient
+ RealScalar m_epsfcn; //
+ Index m_iter; // Number of iterations performed
+ RealScalar m_delta;
+ bool m_useExternalScaling;
+ PermutationType m_permutation;
+ FVectorType m_wa1, m_wa2, m_wa3, m_wa4; //Temporary vectors
+ RealScalar m_par;
+ bool m_isInitialized; // Check whether the minimization step has been called
+ ComputationInfo m_info;
+};
+
+template<typename FunctorType>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType>::minimize(FVectorType &x)
+{
+ LevenbergMarquardtSpace::Status status = minimizeInit(x);
+ if (status==LevenbergMarquardtSpace::ImproperInputParameters) {
+ m_isInitialized = true;
+ return status;
+ }
+ do {
+// std::cout << " uv " << x.transpose() << "\n";
+ status = minimizeOneStep(x);
+ } while (status==LevenbergMarquardtSpace::Running);
+ m_isInitialized = true;
+ return status;
+}
+
+template<typename FunctorType>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType>::minimizeInit(FVectorType &x)
+{
+ n = x.size();
+ m = m_functor.values();
+
+ m_wa1.resize(n); m_wa2.resize(n); m_wa3.resize(n);
+ m_wa4.resize(m);
+ m_fvec.resize(m);
+ //FIXME Sparse Case : Allocate space for the jacobian
+ m_fjac.resize(m, n);
+// m_fjac.reserve(VectorXi::Constant(n,5)); // FIXME Find a better alternative
+ if (!m_useExternalScaling)
+ m_diag.resize(n);
+ eigen_assert( (!m_useExternalScaling || m_diag.size()==n) || "When m_useExternalScaling is set, the caller must provide a valid 'm_diag'");
+ m_qtf.resize(n);
+
+ /* Function Body */
+ m_nfev = 0;
+ m_njev = 0;
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || m < n || m_ftol < 0. || m_xtol < 0. || m_gtol < 0. || m_maxfev <= 0 || m_factor <= 0.){
+ m_info = InvalidInput;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+ }
+
+ if (m_useExternalScaling)
+ for (Index j = 0; j < n; ++j)
+ if (m_diag[j] <= 0.)
+ {
+ m_info = InvalidInput;
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+ }
+
+ /* evaluate the function at the starting point */
+ /* and calculate its norm. */
+ m_nfev = 1;
+ if ( m_functor(x, m_fvec) < 0)
+ return LevenbergMarquardtSpace::UserAsked;
+ m_fnorm = m_fvec.stableNorm();
+
+ /* initialize levenberg-marquardt parameter and iteration counter. */
+ m_par = 0.;
+ m_iter = 1;
+
+ return LevenbergMarquardtSpace::NotStarted;
+}
+
+template<typename FunctorType>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType>::lmder1(
+ FVectorType &x,
+ const Scalar tol
+ )
+{
+ n = x.size();
+ m = m_functor.values();
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || m < n || tol < 0.)
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+
+ resetParameters();
+ m_ftol = tol;
+ m_xtol = tol;
+ m_maxfev = 100*(n+1);
+
+ return minimize(x);
+}
+
+
+template<typename FunctorType>
+LevenbergMarquardtSpace::Status
+LevenbergMarquardt<FunctorType>::lmdif1(
+ FunctorType &functor,
+ FVectorType &x,
+ Index *nfev,
+ const Scalar tol
+ )
+{
+ Index n = x.size();
+ Index m = functor.values();
+
+ /* check the input parameters for errors. */
+ if (n <= 0 || m < n || tol < 0.)
+ return LevenbergMarquardtSpace::ImproperInputParameters;
+
+ NumericalDiff<FunctorType> numDiff(functor);
+ // embedded LevenbergMarquardt
+ LevenbergMarquardt<NumericalDiff<FunctorType> > lm(numDiff);
+ lm.setFtol(tol);
+ lm.setXtol(tol);
+ lm.setMaxfev(200*(n+1));
+
+ LevenbergMarquardtSpace::Status info = LevenbergMarquardtSpace::Status(lm.minimize(x));
+ if (nfev)
+ * nfev = lm.nfev();
+ return info;
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_LEVENBERGMARQUARDT_H
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
index 642916764..6825a7882 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h
@@ -13,12 +13,7 @@
#include "StemFunction.h"
-namespace Eigen {
-
-#if defined(_MSC_VER) || defined(__FreeBSD__)
- template <typename Scalar> Scalar log2(Scalar v) { using std::log; return log(v)/log(Scalar(2)); }
-#endif
-
+namespace Eigen {
/** \ingroup MatrixFunctions_Module
* \brief Class for computing the matrix exponential.
@@ -233,7 +228,7 @@ template <typename MatrixType>
EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade9(const MatrixType &A)
{
const RealScalar b[] = {17643225600., 8821612800., 2075673600., 302702400., 30270240.,
- 2162160., 110880., 3960., 90., 1.};
+ 2162160., 110880., 3960., 90., 1.};
MatrixType A2 = A * A;
MatrixType A4 = A2 * A2;
MatrixType A6 = A4 * A2;
@@ -247,8 +242,8 @@ template <typename MatrixType>
EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade13(const MatrixType &A)
{
const RealScalar b[] = {64764752532480000., 32382376266240000., 7771770303897600.,
- 1187353796428800., 129060195264000., 10559470521600., 670442572800.,
- 33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.};
+ 1187353796428800., 129060195264000., 10559470521600., 670442572800.,
+ 33522128640., 1323241920., 40840800., 960960., 16380., 182., 1.};
MatrixType A2 = A * A;
MatrixType A4 = A2 * A2;
m_tmp1.noalias() = A4 * A2;
@@ -266,11 +261,11 @@ template <typename MatrixType>
EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade17(const MatrixType &A)
{
const RealScalar b[] = {830034394580628357120000.L, 415017197290314178560000.L,
- 100610229646136770560000.L, 15720348382208870400000.L,
- 1774878043152614400000.L, 153822763739893248000.L, 10608466464820224000.L,
- 595373117923584000.L, 27563570274240000.L, 1060137318240000.L,
- 33924394183680.L, 899510451840.L, 19554575040.L, 341863200.L, 4651200.L,
- 46512.L, 306.L, 1.L};
+ 100610229646136770560000.L, 15720348382208870400000.L,
+ 1774878043152614400000.L, 153822763739893248000.L, 10608466464820224000.L,
+ 595373117923584000.L, 27563570274240000.L, 1060137318240000.L,
+ 33924394183680.L, 899510451840.L, 19554575040.L, 341863200.L, 4651200.L,
+ 46512.L, 306.L, 1.L};
MatrixType A2 = A * A;
MatrixType A4 = A2 * A2;
MatrixType A6 = A4 * A2;
@@ -288,16 +283,16 @@ EIGEN_STRONG_INLINE void MatrixExponential<MatrixType>::pade17(const MatrixType
template <typename MatrixType>
void MatrixExponential<MatrixType>::computeUV(float)
{
- using std::max;
+ using std::frexp;
using std::pow;
- using std::ceil;
if (m_l1norm < 4.258730016922831e-001) {
pade3(m_M);
} else if (m_l1norm < 1.880152677804762e+000) {
pade5(m_M);
} else {
const float maxnorm = 3.925724783138660f;
- m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm)));
+ frexp(m_l1norm / maxnorm, &m_squarings);
+ if (m_squarings < 0) m_squarings = 0;
MatrixType A = m_M / pow(Scalar(2), m_squarings);
pade7(A);
}
@@ -306,9 +301,8 @@ void MatrixExponential<MatrixType>::computeUV(float)
template <typename MatrixType>
void MatrixExponential<MatrixType>::computeUV(double)
{
- using std::max;
+ using std::frexp;
using std::pow;
- using std::ceil;
if (m_l1norm < 1.495585217958292e-002) {
pade3(m_M);
} else if (m_l1norm < 2.539398330063230e-001) {
@@ -319,7 +313,8 @@ void MatrixExponential<MatrixType>::computeUV(double)
pade9(m_M);
} else {
const double maxnorm = 5.371920351148152;
- m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm)));
+ frexp(m_l1norm / maxnorm, &m_squarings);
+ if (m_squarings < 0) m_squarings = 0;
MatrixType A = m_M / pow(Scalar(2), m_squarings);
pade13(A);
}
@@ -328,9 +323,8 @@ void MatrixExponential<MatrixType>::computeUV(double)
template <typename MatrixType>
void MatrixExponential<MatrixType>::computeUV(long double)
{
- using std::max;
+ using std::frexp;
using std::pow;
- using std::ceil;
#if LDBL_MANT_DIG == 53 // double precision
computeUV(double());
#elif LDBL_MANT_DIG <= 64 // extended precision
@@ -344,7 +338,8 @@ void MatrixExponential<MatrixType>::computeUV(long double)
pade9(m_M);
} else {
const long double maxnorm = 4.0246098906697353063L;
- m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm)));
+ frexp(m_l1norm / maxnorm, &m_squarings);
+ if (m_squarings < 0) m_squarings = 0;
MatrixType A = m_M / pow(Scalar(2), m_squarings);
pade13(A);
}
@@ -361,7 +356,8 @@ void MatrixExponential<MatrixType>::computeUV(long double)
pade13(m_M);
} else {
const long double maxnorm = 3.2579440895405400856599663723517L;
- m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm)));
+ frexp(m_l1norm / maxnorm, &m_squarings);
+ if (m_squarings < 0) m_squarings = 0;
MatrixType A = m_M / pow(Scalar(2), m_squarings);
pade17(A);
}
@@ -378,7 +374,8 @@ void MatrixExponential<MatrixType>::computeUV(long double)
pade13(m_M);
} else {
const long double maxnorm = 2.884233277829519311757165057717815L;
- m_squarings = (max)(0, (int)ceil(log2(m_l1norm / maxnorm)));
+ frexp(m_l1norm / maxnorm, &m_squarings);
+ if (m_squarings < 0) m_squarings = 0;
MatrixType A = m_M / pow(Scalar(2), m_squarings);
pade17(A);
}
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
index c57ca87ed..7d426640c 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h
@@ -209,7 +209,7 @@ void MatrixFunction<MatrixType,AtomicType,1>::compute(ResultType& result)
permuteSchur();
computeBlockAtomic();
computeOffDiagonal();
- result = m_U * m_fT * m_U.adjoint();
+ result = m_U * (m_fT.template triangularView<Upper>() * m_U.adjoint());
}
/** \brief Store the Schur decomposition of #m_A in #m_T and #m_U */
@@ -235,6 +235,7 @@ void MatrixFunction<MatrixType,AtomicType,1>::computeSchurDecomposition()
template <typename MatrixType, typename AtomicType>
void MatrixFunction<MatrixType,AtomicType,1>::partitionEigenvalues()
{
+ using std::abs;
const Index rows = m_T.rows();
VectorType diag = m_T.diagonal(); // contains eigenvalues of A
@@ -251,14 +252,14 @@ void MatrixFunction<MatrixType,AtomicType,1>::partitionEigenvalues()
// Look for other element to add to the set
for (Index j=i+1; j<rows; ++j) {
- if (internal::abs(diag(j) - diag(i)) <= separation() && std::find(qi->begin(), qi->end(), diag(j)) == qi->end()) {
- typename ListOfClusters::iterator qj = findCluster(diag(j));
- if (qj == m_clusters.end()) {
- qi->push_back(diag(j));
- } else {
- qi->insert(qi->end(), qj->begin(), qj->end());
- m_clusters.erase(qj);
- }
+ if (abs(diag(j) - diag(i)) <= separation() && std::find(qi->begin(), qi->end(), diag(j)) == qi->end()) {
+ typename ListOfClusters::iterator qj = findCluster(diag(j));
+ if (qj == m_clusters.end()) {
+ qi->push_back(diag(j));
+ } else {
+ qi->insert(qi->end(), qj->begin(), qj->end());
+ m_clusters.erase(qj);
+ }
}
}
}
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
index 3a50514b9..c744fc05f 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h
@@ -51,7 +51,6 @@ private:
void compute2x2(const MatrixType& A, MatrixType& result);
void computeBig(const MatrixType& A, MatrixType& result);
- static Scalar atanh(Scalar x);
int getPadeDegree(float normTminusI);
int getPadeDegree(double normTminusI);
int getPadeDegree(long double normTminusI);
@@ -67,10 +66,11 @@ private:
void computePade11(MatrixType& result, const MatrixType& T);
static const int minPadeDegree = 3;
- static const int maxPadeDegree = std::numeric_limits<RealScalar>::digits<= 24? 5: // single precision
- std::numeric_limits<RealScalar>::digits<= 53? 7: // double precision
- std::numeric_limits<RealScalar>::digits<= 64? 8: // extended precision
- std::numeric_limits<RealScalar>::digits<=106? 10: 11; // double-double or quadruple precision
+ static const int maxPadeDegree = std::numeric_limits<RealScalar>::digits<= 24? 5: // single precision
+ std::numeric_limits<RealScalar>::digits<= 53? 7: // double precision
+ std::numeric_limits<RealScalar>::digits<= 64? 8: // extended precision
+ std::numeric_limits<RealScalar>::digits<=106? 10: // double-double
+ 11; // quadruple precision
// Prevent copying
MatrixLogarithmAtomic(const MatrixLogarithmAtomic&);
@@ -92,18 +92,6 @@ MatrixType MatrixLogarithmAtomic<MatrixType>::compute(const MatrixType& A)
return result;
}
-/** \brief Compute atanh (inverse hyperbolic tangent). */
-template <typename MatrixType>
-typename MatrixType::Scalar MatrixLogarithmAtomic<MatrixType>::atanh(typename MatrixType::Scalar x)
-{
- using std::abs;
- using std::sqrt;
- if (abs(x) > sqrt(NumTraits<Scalar>::epsilon()))
- return Scalar(0.5) * log((Scalar(1) + x) / (Scalar(1) - x));
- else
- return x + x*x*x / Scalar(3);
-}
-
/** \brief Compute logarithm of 2x2 triangular matrix. */
template <typename MatrixType>
void MatrixLogarithmAtomic<MatrixType>::compute2x2(const MatrixType& A, MatrixType& result)
@@ -127,8 +115,8 @@ void MatrixLogarithmAtomic<MatrixType>::compute2x2(const MatrixType& A, MatrixTy
} else {
// computation in previous branch is inaccurate if A(1,1) \approx A(0,0)
int unwindingNumber = static_cast<int>(ceil((imag(logA11 - logA00) - M_PI) / (2*M_PI)));
- Scalar z = (A(1,1) - A(0,0)) / (A(1,1) + A(0,0));
- result(0,1) = A(0,1) * (Scalar(2) * atanh(z) + Scalar(0,2*M_PI*unwindingNumber)) / (A(1,1) - A(0,0));
+ Scalar y = A(1,1) - A(0,0), x = A(1,1) + A(0,0);
+ result(0,1) = A(0,1) * (Scalar(2) * numext::atanh2(y,x) + Scalar(0,2*M_PI*unwindingNumber)) / y;
}
}
@@ -137,10 +125,11 @@ void MatrixLogarithmAtomic<MatrixType>::compute2x2(const MatrixType& A, MatrixTy
template <typename MatrixType>
void MatrixLogarithmAtomic<MatrixType>::computeBig(const MatrixType& A, MatrixType& result)
{
+ using std::pow;
int numberOfSquareRoots = 0;
int numberOfExtraSquareRoots = 0;
int degree;
- MatrixType T = A;
+ MatrixType T = A, sqrtT;
const RealScalar maxNormForPade = maxPadeDegree<= 5? 5.3149729967117310e-1: // single precision
maxPadeDegree<= 7? 2.6429608311114350e-1: // double precision
maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision
@@ -153,12 +142,11 @@ void MatrixLogarithmAtomic<MatrixType>::computeBig(const MatrixType& A, MatrixTy
degree = getPadeDegree(normTminusI);
int degree2 = getPadeDegree(normTminusI / RealScalar(2));
if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1))
- break;
+ break;
++numberOfExtraSquareRoots;
}
- MatrixType sqrtT;
MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
- T = sqrtT;
+ T = sqrtT.template triangularView<Upper>();
++numberOfSquareRoots;
}
@@ -172,10 +160,11 @@ int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(float normTminusI)
{
const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1,
5.3149729967117310e-1 };
- for (int degree = 3; degree <= maxPadeDegree; ++degree)
+ int degree = 3;
+ for (; degree <= maxPadeDegree; ++degree)
if (normTminusI <= maxNormForPade[degree - minPadeDegree])
- return degree;
- assert(false); // this line should never be reached
+ break;
+ return degree;
}
/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */
@@ -184,10 +173,11 @@ int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(double normTminusI)
{
const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2,
1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 };
- for (int degree = 3; degree <= maxPadeDegree; ++degree)
+ int degree = 3;
+ for (; degree <= maxPadeDegree; ++degree)
if (normTminusI <= maxNormForPade[degree - minPadeDegree])
- return degree;
- assert(false); // this line should never be reached
+ break;
+ return degree;
}
/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */
@@ -195,29 +185,30 @@ template <typename MatrixType>
int MatrixLogarithmAtomic<MatrixType>::getPadeDegree(long double normTminusI)
{
#if LDBL_MANT_DIG == 53 // double precision
- const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2,
- 1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 };
+ const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L,
+ 1.1352802267628681e-1L, 1.8662860613541288e-1L, 2.642960831111435e-1L };
#elif LDBL_MANT_DIG <= 64 // extended precision
- const double maxNormForPade[] = { 5.48256690357782863103e-3 /* degree = 3 */, 2.34559162387971167321e-2,
- 5.84603923897347449857e-2, 1.08486423756725170223e-1, 1.68385767881294446649e-1,
- 2.32777776523703892094e-1 };
+ const long double maxNormForPade[] = { 5.48256690357782863103e-3L /* degree = 3 */, 2.34559162387971167321e-2L,
+ 5.84603923897347449857e-2L, 1.08486423756725170223e-1L, 1.68385767881294446649e-1L,
+ 2.32777776523703892094e-1L };
#elif LDBL_MANT_DIG <= 106 // double-double
- const double maxNormForPade[] = { 8.58970550342939562202529664318890e-5 /* degree = 3 */,
- 9.34074328446359654039446552677759e-4, 4.26117194647672175773064114582860e-3,
- 1.21546224740281848743149666560464e-2, 2.61100544998339436713088248557444e-2,
- 4.66170074627052749243018566390567e-2, 7.32585144444135027565872014932387e-2,
- 1.05026503471351080481093652651105e-1 };
+ const long double maxNormForPade[] = { 8.58970550342939562202529664318890e-5L /* degree = 3 */,
+ 9.34074328446359654039446552677759e-4L, 4.26117194647672175773064114582860e-3L,
+ 1.21546224740281848743149666560464e-2L, 2.61100544998339436713088248557444e-2L,
+ 4.66170074627052749243018566390567e-2L, 7.32585144444135027565872014932387e-2L,
+ 1.05026503471351080481093652651105e-1L };
#else // quadruple precision
- const double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5 /* degree = 3 */,
- 5.8853168473544560470387769480192666e-4, 2.9216120366601315391789493628113520e-3,
- 8.8415758124319434347116734705174308e-3, 1.9850836029449446668518049562565291e-2,
- 3.6688019729653446926585242192447447e-2, 5.9290962294020186998954055264528393e-2,
- 8.6998436081634343903250580992127677e-2, 1.1880960220216759245467951592883642e-1 };
+ const long double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5L /* degree = 3 */,
+ 5.8853168473544560470387769480192666e-4L, 2.9216120366601315391789493628113520e-3L,
+ 8.8415758124319434347116734705174308e-3L, 1.9850836029449446668518049562565291e-2L,
+ 3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L,
+ 8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L };
#endif
- for (int degree = 3; degree <= maxPadeDegree; ++degree)
+ int degree = 3;
+ for (; degree <= maxPadeDegree; ++degree)
if (normTminusI <= maxNormForPade[degree - minPadeDegree])
- return degree;
- assert(false); // this line should never be reached
+ break;
+ return degree;
}
/* \brief Compute Pade approximation to matrix logarithm */
@@ -246,7 +237,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade3(MatrixType& result, const M
0.8872983346207416885179265399782400L };
const RealScalar weights[] = { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L,
0.2777777777777777777777777777777778L };
- assert(degree <= maxPadeDegree);
+ eigen_assert(degree <= maxPadeDegree);
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
result.setZero(T.rows(), T.rows());
for (int k = 0; k < degree; ++k)
@@ -262,7 +253,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade4(MatrixType& result, const M
0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L };
const RealScalar weights[] = { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L,
0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L };
- assert(degree <= maxPadeDegree);
+ eigen_assert(degree <= maxPadeDegree);
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
result.setZero(T.rows(), T.rows());
for (int k = 0; k < degree; ++k)
@@ -280,7 +271,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade5(MatrixType& result, const M
const RealScalar weights[] = { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L,
0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L,
0.1184634425280945437571320203599587L };
- assert(degree <= maxPadeDegree);
+ eigen_assert(degree <= maxPadeDegree);
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
result.setZero(T.rows(), T.rows());
for (int k = 0; k < degree; ++k)
@@ -294,11 +285,11 @@ void MatrixLogarithmAtomic<MatrixType>::computePade6(MatrixType& result, const M
const int degree = 6;
const RealScalar nodes[] = { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L,
0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L,
- 0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L };
+ 0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L };
const RealScalar weights[] = { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L,
0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L,
- 0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L };
- assert(degree <= maxPadeDegree);
+ 0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L };
+ eigen_assert(degree <= maxPadeDegree);
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
result.setZero(T.rows(), T.rows());
for (int k = 0; k < degree; ++k)
@@ -318,7 +309,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade7(MatrixType& result, const M
0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L,
0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L,
0.0647424830844348466353057163395410L };
- assert(degree <= maxPadeDegree);
+ eigen_assert(degree <= maxPadeDegree);
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
result.setZero(T.rows(), T.rows());
for (int k = 0; k < degree; ++k)
@@ -338,7 +329,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade8(MatrixType& result, const M
0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L,
0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L,
0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L };
- assert(degree <= maxPadeDegree);
+ eigen_assert(degree <= maxPadeDegree);
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
result.setZero(T.rows(), T.rows());
for (int k = 0; k < degree; ++k)
@@ -360,7 +351,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade9(MatrixType& result, const M
0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L,
0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L,
0.0406371941807872059859460790552618L };
- assert(degree <= maxPadeDegree);
+ eigen_assert(degree <= maxPadeDegree);
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
result.setZero(T.rows(), T.rows());
for (int k = 0; k < degree; ++k)
@@ -382,7 +373,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade10(MatrixType& result, const
0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L,
0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L,
0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L };
- assert(degree <= maxPadeDegree);
+ eigen_assert(degree <= maxPadeDegree);
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
result.setZero(T.rows(), T.rows());
for (int k = 0; k < degree; ++k)
@@ -406,7 +397,7 @@ void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const
0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L,
0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L,
0.0278342835580868332413768602212743L };
- assert(degree <= maxPadeDegree);
+ eigen_assert(degree <= maxPadeDegree);
MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows());
result.setZero(T.rows(), T.rows());
for (int k = 0; k < degree; ++k)
@@ -423,8 +414,8 @@ void MatrixLogarithmAtomic<MatrixType>::computePade11(MatrixType& result, const
* This class holds the argument to the matrix function until it is
* assigned or evaluated for some other reason (so the argument
* should not be changed in the meantime). It is the return type of
- * matrixBase::matrixLogarithm() and most of the time this is the
- * only way it is used.
+ * MatrixBase::log() and most of the time this is the only way it
+ * is used.
*/
template<typename Derived> class MatrixLogarithmReturnValue
: public ReturnByValue<MatrixLogarithmReturnValue<Derived> >
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
new file mode 100644
index 000000000..c32437281
--- /dev/null
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h
@@ -0,0 +1,509 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012, 2013 Chen-Pang He <jdh8@ms63.hinet.net>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MATRIX_POWER
+#define EIGEN_MATRIX_POWER
+
+namespace Eigen {
+
+template<typename MatrixType> class MatrixPower;
+
+template<typename MatrixType>
+class MatrixPowerRetval : public ReturnByValue< MatrixPowerRetval<MatrixType> >
+{
+ public:
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ MatrixPowerRetval(MatrixPower<MatrixType>& pow, RealScalar p) : m_pow(pow), m_p(p)
+ { }
+
+ template<typename ResultType>
+ inline void evalTo(ResultType& res) const
+ { m_pow.compute(res, m_p); }
+
+ Index rows() const { return m_pow.rows(); }
+ Index cols() const { return m_pow.cols(); }
+
+ private:
+ MatrixPower<MatrixType>& m_pow;
+ const RealScalar m_p;
+ MatrixPowerRetval& operator=(const MatrixPowerRetval&);
+};
+
+template<typename MatrixType>
+class MatrixPowerAtomic
+{
+ private:
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef std::complex<RealScalar> ComplexScalar;
+ typedef typename MatrixType::Index Index;
+ typedef Array<Scalar, RowsAtCompileTime, 1, ColMajor, MaxRowsAtCompileTime> ArrayType;
+
+ const MatrixType& m_A;
+ RealScalar m_p;
+
+ void computePade(int degree, const MatrixType& IminusT, MatrixType& res) const;
+ void compute2x2(MatrixType& res, RealScalar p) const;
+ void computeBig(MatrixType& res) const;
+ static int getPadeDegree(float normIminusT);
+ static int getPadeDegree(double normIminusT);
+ static int getPadeDegree(long double normIminusT);
+ static ComplexScalar computeSuperDiag(const ComplexScalar&, const ComplexScalar&, RealScalar p);
+ static RealScalar computeSuperDiag(RealScalar, RealScalar, RealScalar p);
+
+ public:
+ MatrixPowerAtomic(const MatrixType& T, RealScalar p);
+ void compute(MatrixType& res) const;
+};
+
+template<typename MatrixType>
+MatrixPowerAtomic<MatrixType>::MatrixPowerAtomic(const MatrixType& T, RealScalar p) :
+ m_A(T), m_p(p)
+{ eigen_assert(T.rows() == T.cols()); }
+
+template<typename MatrixType>
+void MatrixPowerAtomic<MatrixType>::compute(MatrixType& res) const
+{
+ res.resizeLike(m_A);
+ switch (m_A.rows()) {
+ case 0:
+ break;
+ case 1:
+ res(0,0) = std::pow(m_A(0,0), m_p);
+ break;
+ case 2:
+ compute2x2(res, m_p);
+ break;
+ default:
+ computeBig(res);
+ }
+}
+
+template<typename MatrixType>
+void MatrixPowerAtomic<MatrixType>::computePade(int degree, const MatrixType& IminusT, MatrixType& res) const
+{
+ int i = degree<<1;
+ res = (m_p-degree) / ((i-1)<<1) * IminusT;
+ for (--i; i; --i) {
+ res = (MatrixType::Identity(IminusT.rows(), IminusT.cols()) + res).template triangularView<Upper>()
+ .solve((i==1 ? -m_p : i&1 ? (-m_p-(i>>1))/(i<<1) : (m_p-(i>>1))/((i-1)<<1)) * IminusT).eval();
+ }
+ res += MatrixType::Identity(IminusT.rows(), IminusT.cols());
+}
+
+// This function assumes that res has the correct size (see bug 614)
+template<typename MatrixType>
+void MatrixPowerAtomic<MatrixType>::compute2x2(MatrixType& res, RealScalar p) const
+{
+ using std::abs;
+ using std::pow;
+
+ ArrayType logTdiag = m_A.diagonal().array().log();
+ res.coeffRef(0,0) = pow(m_A.coeff(0,0), p);
+
+ for (Index i=1; i < m_A.cols(); ++i) {
+ res.coeffRef(i,i) = pow(m_A.coeff(i,i), p);
+ if (m_A.coeff(i-1,i-1) == m_A.coeff(i,i))
+ res.coeffRef(i-1,i) = p * pow(m_A.coeff(i,i), p-1);
+ else if (2*abs(m_A.coeff(i-1,i-1)) < abs(m_A.coeff(i,i)) || 2*abs(m_A.coeff(i,i)) < abs(m_A.coeff(i-1,i-1)))
+ res.coeffRef(i-1,i) = (res.coeff(i,i)-res.coeff(i-1,i-1)) / (m_A.coeff(i,i)-m_A.coeff(i-1,i-1));
+ else
+ res.coeffRef(i-1,i) = computeSuperDiag(m_A.coeff(i,i), m_A.coeff(i-1,i-1), p);
+ res.coeffRef(i-1,i) *= m_A.coeff(i-1,i);
+ }
+}
+
+template<typename MatrixType>
+void MatrixPowerAtomic<MatrixType>::computeBig(MatrixType& res) const
+{
+ const int digits = std::numeric_limits<RealScalar>::digits;
+ const RealScalar maxNormForPade = digits <= 24? 4.3386528e-1f: // sigle precision
+ digits <= 53? 2.789358995219730e-1: // double precision
+ digits <= 64? 2.4471944416607995472e-1L: // extended precision
+ digits <= 106? 1.1016843812851143391275867258512e-1L: // double-double
+ 9.134603732914548552537150753385375e-2L; // quadruple precision
+ MatrixType IminusT, sqrtT, T = m_A.template triangularView<Upper>();
+ RealScalar normIminusT;
+ int degree, degree2, numberOfSquareRoots = 0;
+ bool hasExtraSquareRoot = false;
+
+ /* FIXME
+ * For singular T, norm(I - T) >= 1 but maxNormForPade < 1, leads to infinite
+ * loop. We should move 0 eigenvalues to bottom right corner. We need not
+ * worry about tiny values (e.g. 1e-300) because they will reach 1 if
+ * repetitively sqrt'ed.
+ *
+ * If the 0 eigenvalues are semisimple, they can form a 0 matrix at the
+ * bottom right corner.
+ *
+ * [ T A ]^p [ T^p (T^-1 T^p A) ]
+ * [ ] = [ ]
+ * [ 0 0 ] [ 0 0 ]
+ */
+ for (Index i=0; i < m_A.cols(); ++i)
+ eigen_assert(m_A(i,i) != RealScalar(0));
+
+ while (true) {
+ IminusT = MatrixType::Identity(m_A.rows(), m_A.cols()) - T;
+ normIminusT = IminusT.cwiseAbs().colwise().sum().maxCoeff();
+ if (normIminusT < maxNormForPade) {
+ degree = getPadeDegree(normIminusT);
+ degree2 = getPadeDegree(normIminusT/2);
+ if (degree - degree2 <= 1 || hasExtraSquareRoot)
+ break;
+ hasExtraSquareRoot = true;
+ }
+ MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
+ T = sqrtT.template triangularView<Upper>();
+ ++numberOfSquareRoots;
+ }
+ computePade(degree, IminusT, res);
+
+ for (; numberOfSquareRoots; --numberOfSquareRoots) {
+ compute2x2(res, std::ldexp(m_p, -numberOfSquareRoots));
+ res = res.template triangularView<Upper>() * res;
+ }
+ compute2x2(res, m_p);
+}
+
+template<typename MatrixType>
+inline int MatrixPowerAtomic<MatrixType>::getPadeDegree(float normIminusT)
+{
+ const float maxNormForPade[] = { 2.8064004e-1f /* degree = 3 */ , 4.3386528e-1f };
+ int degree = 3;
+ for (; degree <= 4; ++degree)
+ if (normIminusT <= maxNormForPade[degree - 3])
+ break;
+ return degree;
+}
+
+template<typename MatrixType>
+inline int MatrixPowerAtomic<MatrixType>::getPadeDegree(double normIminusT)
+{
+ const double maxNormForPade[] = { 1.884160592658218e-2 /* degree = 3 */ , 6.038881904059573e-2, 1.239917516308172e-1,
+ 1.999045567181744e-1, 2.789358995219730e-1 };
+ int degree = 3;
+ for (; degree <= 7; ++degree)
+ if (normIminusT <= maxNormForPade[degree - 3])
+ break;
+ return degree;
+}
+
+template<typename MatrixType>
+inline int MatrixPowerAtomic<MatrixType>::getPadeDegree(long double normIminusT)
+{
+#if LDBL_MANT_DIG == 53
+ const int maxPadeDegree = 7;
+ const double maxNormForPade[] = { 1.884160592658218e-2L /* degree = 3 */ , 6.038881904059573e-2L, 1.239917516308172e-1L,
+ 1.999045567181744e-1L, 2.789358995219730e-1L };
+#elif LDBL_MANT_DIG <= 64
+ const int maxPadeDegree = 8;
+ const double maxNormForPade[] = { 6.3854693117491799460e-3L /* degree = 3 */ , 2.6394893435456973676e-2L,
+ 6.4216043030404063729e-2L, 1.1701165502926694307e-1L, 1.7904284231268670284e-1L, 2.4471944416607995472e-1L };
+#elif LDBL_MANT_DIG <= 106
+ const int maxPadeDegree = 10;
+ const double maxNormForPade[] = { 1.0007161601787493236741409687186e-4L /* degree = 3 */ ,
+ 1.0007161601787493236741409687186e-3L, 4.7069769360887572939882574746264e-3L, 1.3220386624169159689406653101695e-2L,
+ 2.8063482381631737920612944054906e-2L, 4.9625993951953473052385361085058e-2L, 7.7367040706027886224557538328171e-2L,
+ 1.1016843812851143391275867258512e-1L };
+#else
+ const int maxPadeDegree = 10;
+ const double maxNormForPade[] = { 5.524506147036624377378713555116378e-5L /* degree = 3 */ ,
+ 6.640600568157479679823602193345995e-4L, 3.227716520106894279249709728084626e-3L,
+ 9.619593944683432960546978734646284e-3L, 2.134595382433742403911124458161147e-2L,
+ 3.908166513900489428442993794761185e-2L, 6.266780814639442865832535460550138e-2L,
+ 9.134603732914548552537150753385375e-2L };
+#endif
+ int degree = 3;
+ for (; degree <= maxPadeDegree; ++degree)
+ if (normIminusT <= maxNormForPade[degree - 3])
+ break;
+ return degree;
+}
+
+template<typename MatrixType>
+inline typename MatrixPowerAtomic<MatrixType>::ComplexScalar
+MatrixPowerAtomic<MatrixType>::computeSuperDiag(const ComplexScalar& curr, const ComplexScalar& prev, RealScalar p)
+{
+ ComplexScalar logCurr = std::log(curr);
+ ComplexScalar logPrev = std::log(prev);
+ int unwindingNumber = std::ceil((numext::imag(logCurr - logPrev) - M_PI) / (2*M_PI));
+ ComplexScalar w = numext::atanh2(curr - prev, curr + prev) + ComplexScalar(0, M_PI*unwindingNumber);
+ return RealScalar(2) * std::exp(RealScalar(0.5) * p * (logCurr + logPrev)) * std::sinh(p * w) / (curr - prev);
+}
+
+template<typename MatrixType>
+inline typename MatrixPowerAtomic<MatrixType>::RealScalar
+MatrixPowerAtomic<MatrixType>::computeSuperDiag(RealScalar curr, RealScalar prev, RealScalar p)
+{
+ RealScalar w = numext::atanh2(curr - prev, curr + prev);
+ return 2 * std::exp(p * (std::log(curr) + std::log(prev)) / 2) * std::sinh(p * w) / (curr - prev);
+}
+
+/**
+ * \ingroup MatrixFunctions_Module
+ *
+ * \brief Class for computing matrix powers.
+ *
+ * \tparam MatrixType type of the base, expected to be an instantiation
+ * of the Matrix class template.
+ *
+ * This class is capable of computing real/complex matrices raised to
+ * an arbitrary real power. Meanwhile, it saves the result of Schur
+ * decomposition if an non-integral power has even been calculated.
+ * Therefore, if you want to compute multiple (>= 2) matrix powers
+ * for the same matrix, using the class directly is more efficient than
+ * calling MatrixBase::pow().
+ *
+ * Example:
+ * \include MatrixPower_optimal.cpp
+ * Output: \verbinclude MatrixPower_optimal.out
+ */
+template<typename MatrixType>
+class MatrixPower
+{
+ private:
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
+ };
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename MatrixType::Index Index;
+
+ public:
+ /**
+ * \brief Constructor.
+ *
+ * \param[in] A the base of the matrix power.
+ *
+ * The class stores a reference to A, so it should not be changed
+ * (or destroyed) before evaluation.
+ */
+ explicit MatrixPower(const MatrixType& A) : m_A(A), m_conditionNumber(0)
+ { eigen_assert(A.rows() == A.cols()); }
+
+ /**
+ * \brief Returns the matrix power.
+ *
+ * \param[in] p exponent, a real scalar.
+ * \return The expression \f$ A^p \f$, where A is specified in the
+ * constructor.
+ */
+ const MatrixPowerRetval<MatrixType> operator()(RealScalar p)
+ { return MatrixPowerRetval<MatrixType>(*this, p); }
+
+ /**
+ * \brief Compute the matrix power.
+ *
+ * \param[in] p exponent, a real scalar.
+ * \param[out] res \f$ A^p \f$ where A is specified in the
+ * constructor.
+ */
+ template<typename ResultType>
+ void compute(ResultType& res, RealScalar p);
+
+ Index rows() const { return m_A.rows(); }
+ Index cols() const { return m_A.cols(); }
+
+ private:
+ typedef std::complex<RealScalar> ComplexScalar;
+ typedef Matrix<ComplexScalar, RowsAtCompileTime, ColsAtCompileTime, MatrixType::Options,
+ MaxRowsAtCompileTime, MaxColsAtCompileTime> ComplexMatrix;
+
+ typename MatrixType::Nested m_A;
+ MatrixType m_tmp;
+ ComplexMatrix m_T, m_U, m_fT;
+ RealScalar m_conditionNumber;
+
+ RealScalar modfAndInit(RealScalar, RealScalar*);
+
+ template<typename ResultType>
+ void computeIntPower(ResultType&, RealScalar);
+
+ template<typename ResultType>
+ void computeFracPower(ResultType&, RealScalar);
+
+ template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+ static void revertSchur(
+ Matrix<ComplexScalar, Rows, Cols, Options, MaxRows, MaxCols>& res,
+ const ComplexMatrix& T,
+ const ComplexMatrix& U);
+
+ template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+ static void revertSchur(
+ Matrix<RealScalar, Rows, Cols, Options, MaxRows, MaxCols>& res,
+ const ComplexMatrix& T,
+ const ComplexMatrix& U);
+};
+
+template<typename MatrixType>
+template<typename ResultType>
+void MatrixPower<MatrixType>::compute(ResultType& res, RealScalar p)
+{
+ switch (cols()) {
+ case 0:
+ break;
+ case 1:
+ res(0,0) = std::pow(m_A.coeff(0,0), p);
+ break;
+ default:
+ RealScalar intpart, x = modfAndInit(p, &intpart);
+ computeIntPower(res, intpart);
+ computeFracPower(res, x);
+ }
+}
+
+template<typename MatrixType>
+typename MatrixPower<MatrixType>::RealScalar
+MatrixPower<MatrixType>::modfAndInit(RealScalar x, RealScalar* intpart)
+{
+ typedef Array<RealScalar, RowsAtCompileTime, 1, ColMajor, MaxRowsAtCompileTime> RealArray;
+
+ *intpart = std::floor(x);
+ RealScalar res = x - *intpart;
+
+ if (!m_conditionNumber && res) {
+ const ComplexSchur<MatrixType> schurOfA(m_A);
+ m_T = schurOfA.matrixT();
+ m_U = schurOfA.matrixU();
+
+ const RealArray absTdiag = m_T.diagonal().array().abs();
+ m_conditionNumber = absTdiag.maxCoeff() / absTdiag.minCoeff();
+ }
+
+ if (res>RealScalar(0.5) && res>(1-res)*std::pow(m_conditionNumber, res)) {
+ --res;
+ ++*intpart;
+ }
+ return res;
+}
+
+template<typename MatrixType>
+template<typename ResultType>
+void MatrixPower<MatrixType>::computeIntPower(ResultType& res, RealScalar p)
+{
+ RealScalar pp = std::abs(p);
+
+ if (p<0) m_tmp = m_A.inverse();
+ else m_tmp = m_A;
+
+ res = MatrixType::Identity(rows(), cols());
+ while (pp >= 1) {
+ if (std::fmod(pp, 2) >= 1)
+ res = m_tmp * res;
+ m_tmp *= m_tmp;
+ pp /= 2;
+ }
+}
+
+template<typename MatrixType>
+template<typename ResultType>
+void MatrixPower<MatrixType>::computeFracPower(ResultType& res, RealScalar p)
+{
+ if (p) {
+ eigen_assert(m_conditionNumber);
+ MatrixPowerAtomic<ComplexMatrix>(m_T, p).compute(m_fT);
+ revertSchur(m_tmp, m_fT, m_U);
+ res = m_tmp * res;
+ }
+}
+
+template<typename MatrixType>
+template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+inline void MatrixPower<MatrixType>::revertSchur(
+ Matrix<ComplexScalar, Rows, Cols, Options, MaxRows, MaxCols>& res,
+ const ComplexMatrix& T,
+ const ComplexMatrix& U)
+{ res.noalias() = U * (T.template triangularView<Upper>() * U.adjoint()); }
+
+template<typename MatrixType>
+template<int Rows, int Cols, int Options, int MaxRows, int MaxCols>
+inline void MatrixPower<MatrixType>::revertSchur(
+ Matrix<RealScalar, Rows, Cols, Options, MaxRows, MaxCols>& res,
+ const ComplexMatrix& T,
+ const ComplexMatrix& U)
+{ res.noalias() = (U * (T.template triangularView<Upper>() * U.adjoint())).real(); }
+
+/**
+ * \ingroup MatrixFunctions_Module
+ *
+ * \brief Proxy for the matrix power of some matrix (expression).
+ *
+ * \tparam Derived type of the base, a matrix (expression).
+ *
+ * This class holds the arguments to the matrix power until it is
+ * assigned or evaluated for some other reason (so the argument
+ * should not be changed in the meantime). It is the return type of
+ * MatrixBase::pow() and related functions and most of the
+ * time this is the only way it is used.
+ */
+template<typename Derived>
+class MatrixPowerReturnValue : public ReturnByValue< MatrixPowerReturnValue<Derived> >
+{
+ public:
+ typedef typename Derived::PlainObject PlainObject;
+ typedef typename Derived::RealScalar RealScalar;
+ typedef typename Derived::Index Index;
+
+ /**
+ * \brief Constructor.
+ *
+ * \param[in] A %Matrix (expression), the base of the matrix power.
+ * \param[in] p scalar, the exponent of the matrix power.
+ */
+ MatrixPowerReturnValue(const Derived& A, RealScalar p) : m_A(A), m_p(p)
+ { }
+
+ /**
+ * \brief Compute the matrix power.
+ *
+ * \param[out] result \f$ A^p \f$ where \p A and \p p are as in the
+ * constructor.
+ */
+ template<typename ResultType>
+ inline void evalTo(ResultType& res) const
+ { MatrixPower<PlainObject>(m_A.eval()).compute(res, m_p); }
+
+ Index rows() const { return m_A.rows(); }
+ Index cols() const { return m_A.cols(); }
+
+ private:
+ const Derived& m_A;
+ const RealScalar m_p;
+ MatrixPowerReturnValue& operator=(const MatrixPowerReturnValue&);
+};
+
+namespace internal {
+
+template<typename MatrixPowerType>
+struct traits< MatrixPowerRetval<MatrixPowerType> >
+{ typedef typename MatrixPowerType::PlainObject ReturnType; };
+
+template<typename Derived>
+struct traits< MatrixPowerReturnValue<Derived> >
+{ typedef typename Derived::PlainObject ReturnType; };
+
+}
+
+template<typename Derived>
+const MatrixPowerReturnValue<Derived> MatrixBase<Derived>::pow(const RealScalar& p) const
+{ return MatrixPowerReturnValue<Derived>(derived(), p); }
+
+} // namespace Eigen
+
+#endif // EIGEN_MATRIX_POWER
diff --git a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
index 10319fa17..b48ea9d46 100644
--- a/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
+++ b/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h
@@ -60,17 +60,17 @@ class MatrixSquareRootQuasiTriangular
void computeOffDiagonalPartOfSqrt(MatrixType& sqrtT, const MatrixType& T);
void compute2x2diagonalBlock(MatrixType& sqrtT, const MatrixType& T, typename MatrixType::Index i);
void compute1x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
- typename MatrixType::Index i, typename MatrixType::Index j);
+ typename MatrixType::Index i, typename MatrixType::Index j);
void compute1x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
- typename MatrixType::Index i, typename MatrixType::Index j);
+ typename MatrixType::Index i, typename MatrixType::Index j);
void compute2x1offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
- typename MatrixType::Index i, typename MatrixType::Index j);
+ typename MatrixType::Index i, typename MatrixType::Index j);
void compute2x2offDiagonalBlock(MatrixType& sqrtT, const MatrixType& T,
- typename MatrixType::Index i, typename MatrixType::Index j);
+ typename MatrixType::Index i, typename MatrixType::Index j);
template <typename SmallMatrixType>
static void solveAuxiliaryEquation(SmallMatrixType& X, const SmallMatrixType& A,
- const SmallMatrixType& B, const SmallMatrixType& C);
+ const SmallMatrixType& B, const SmallMatrixType& C);
const MatrixType& m_A;
};
@@ -79,18 +79,9 @@ template <typename MatrixType>
template <typename ResultType>
void MatrixSquareRootQuasiTriangular<MatrixType>::compute(ResultType &result)
{
- // Compute Schur decomposition of m_A
- const RealSchur<MatrixType> schurOfA(m_A);
- const MatrixType& T = schurOfA.matrixT();
- const MatrixType& U = schurOfA.matrixU();
-
- // Compute square root of T
- MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.rows());
- computeDiagonalPartOfSqrt(sqrtT, T);
- computeOffDiagonalPartOfSqrt(sqrtT, T);
-
- // Compute square root of m_A
- result = U * sqrtT * U.adjoint();
+ result.resize(m_A.rows(), m_A.cols());
+ computeDiagonalPartOfSqrt(result, m_A);
+ computeOffDiagonalPartOfSqrt(result, m_A);
}
// pre: T is quasi-upper-triangular and sqrtT is a zero matrix of the same size
@@ -99,11 +90,12 @@ template <typename MatrixType>
void MatrixSquareRootQuasiTriangular<MatrixType>::computeDiagonalPartOfSqrt(MatrixType& sqrtT,
const MatrixType& T)
{
+ using std::sqrt;
const Index size = m_A.rows();
for (Index i = 0; i < size; i++) {
if (i == size - 1 || T.coeff(i+1, i) == 0) {
- eigen_assert(T(i,i) > 0);
- sqrtT.coeffRef(i,i) = internal::sqrt(T.coeff(i,i));
+ eigen_assert(T(i,i) >= 0);
+ sqrtT.coeffRef(i,i) = sqrt(T.coeff(i,i));
}
else {
compute2x2diagonalBlock(sqrtT, T, i);
@@ -289,17 +281,14 @@ template <typename MatrixType>
template <typename ResultType>
void MatrixSquareRootTriangular<MatrixType>::compute(ResultType &result)
{
- // Compute Schur decomposition of m_A
- const ComplexSchur<MatrixType> schurOfA(m_A);
- const MatrixType& T = schurOfA.matrixT();
- const MatrixType& U = schurOfA.matrixU();
+ using std::sqrt;
- // Compute square root of T and store it in upper triangular part of result
+ // Compute square root of m_A and store it in upper triangular part of result
// This uses that the square root of triangular matrices can be computed directly.
result.resize(m_A.rows(), m_A.cols());
typedef typename MatrixType::Index Index;
for (Index i = 0; i < m_A.rows(); i++) {
- result.coeffRef(i,i) = internal::sqrt(T.coeff(i,i));
+ result.coeffRef(i,i) = sqrt(m_A.coeff(i,i));
}
for (Index j = 1; j < m_A.cols(); j++) {
for (Index i = j-1; i >= 0; i--) {
@@ -307,14 +296,9 @@ void MatrixSquareRootTriangular<MatrixType>::compute(ResultType &result)
// if i = j-1, then segment has length 0 so tmp = 0
Scalar tmp = (result.row(i).segment(i+1,j-i-1) * result.col(j).segment(i+1,j-i-1)).value();
// denominator may be zero if original matrix is singular
- result.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (result.coeff(i,i) + result.coeff(j,j));
+ result.coeffRef(i,j) = (m_A.coeff(i,j) - tmp) / (result.coeff(i,i) + result.coeff(j,j));
}
}
-
- // Compute square root of m_A as U * result * U.adjoint()
- MatrixType tmp;
- tmp.noalias() = U * result.template triangularView<Upper>();
- result.noalias() = tmp * U.adjoint();
}
@@ -371,9 +355,8 @@ class MatrixSquareRoot<MatrixType, 0>
const MatrixType& U = schurOfA.matrixU();
// Compute square root of T
- MatrixSquareRootQuasiTriangular<MatrixType> tmp(T);
- MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.rows());
- tmp.compute(sqrtT);
+ MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.cols());
+ MatrixSquareRootQuasiTriangular<MatrixType>(T).compute(sqrtT);
// Compute square root of m_A
result = U * sqrtT * U.adjoint();
@@ -405,12 +388,11 @@ class MatrixSquareRoot<MatrixType, 1>
const MatrixType& U = schurOfA.matrixU();
// Compute square root of T
- MatrixSquareRootTriangular<MatrixType> tmp(T);
- MatrixType sqrtT = MatrixType::Zero(m_A.rows(), m_A.rows());
- tmp.compute(sqrtT);
+ MatrixType sqrtT;
+ MatrixSquareRootTriangular<MatrixType>(T).compute(sqrtT);
// Compute square root of m_A
- result = U * sqrtT * U.adjoint();
+ result = U * (sqrtT.template triangularView<Upper>() * U.adjoint());
}
private:
diff --git a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
index d9ce4eab6..b8ba6ddcb 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h
@@ -52,7 +52,7 @@ public:
Parameters()
: factor(Scalar(100.))
, maxfev(1000)
- , xtol(internal::sqrt(NumTraits<Scalar>::epsilon()))
+ , xtol(std::sqrt(NumTraits<Scalar>::epsilon()))
, nb_of_subdiagonals(-1)
, nb_of_superdiagonals(-1)
, epsfcn(Scalar(0.)) {}
@@ -70,7 +70,7 @@ public:
HybridNonLinearSolverSpace::Status hybrj1(
FVectorType &x,
- const Scalar tol = internal::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
);
HybridNonLinearSolverSpace::Status solveInit(FVectorType &x);
@@ -79,7 +79,7 @@ public:
HybridNonLinearSolverSpace::Status hybrd1(
FVectorType &x,
- const Scalar tol = internal::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
);
HybridNonLinearSolverSpace::Status solveNumericalDiffInit(FVectorType &x);
@@ -150,7 +150,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(FVectorType &x)
fjac.resize(n, n);
if (!useExternalScaling)
diag.resize(n);
- assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
+ eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
/* Function Body */
nfev = 0;
@@ -185,7 +185,9 @@ template<typename FunctorType, typename Scalar>
HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
{
- assert(x.size()==n); // check the caller is not cheating us
+ using std::abs;
+
+ eigen_assert(x.size()==n); // check the caller is not cheating us
Index j;
std::vector<JacobiRotation<Scalar> > v_givens(n), w_givens(n);
@@ -252,14 +254,14 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
/* compute the scaled actual reduction. */
actred = -1.;
if (fnorm1 < fnorm) /* Computing 2nd power */
- actred = 1. - internal::abs2(fnorm1 / fnorm);
+ actred = 1. - numext::abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction. */
wa3 = R.template triangularView<Upper>()*wa1 + qtf;
temp = wa3.stableNorm();
prered = 0.;
if (temp < fnorm) /* Computing 2nd power */
- prered = 1. - internal::abs2(temp / fnorm);
+ prered = 1. - numext::abs2(temp / fnorm);
/* compute the ratio of the actual to the predicted reduction. */
ratio = 0.;
@@ -276,7 +278,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
++ncsuc;
if (ratio >= Scalar(.5) || ncsuc > 1)
delta = (std::max)(delta, pnorm / Scalar(.5));
- if (internal::abs(ratio - 1.) <= Scalar(.1)) {
+ if (abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
}
}
@@ -388,7 +390,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(FVectorType &
fvec.resize(n);
if (!useExternalScaling)
diag.resize(n);
- assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
+ eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
/* Function Body */
nfev = 0;
@@ -423,6 +425,9 @@ template<typename FunctorType, typename Scalar>
HybridNonLinearSolverSpace::Status
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType &x)
{
+ using std::sqrt;
+ using std::abs;
+
assert(x.size()==n); // check the caller is not cheating us
Index j;
@@ -492,14 +497,14 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
/* compute the scaled actual reduction. */
actred = -1.;
if (fnorm1 < fnorm) /* Computing 2nd power */
- actred = 1. - internal::abs2(fnorm1 / fnorm);
+ actred = 1. - numext::abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction. */
wa3 = R.template triangularView<Upper>()*wa1 + qtf;
temp = wa3.stableNorm();
prered = 0.;
if (temp < fnorm) /* Computing 2nd power */
- prered = 1. - internal::abs2(temp / fnorm);
+ prered = 1. - numext::abs2(temp / fnorm);
/* compute the ratio of the actual to the predicted reduction. */
ratio = 0.;
@@ -516,7 +521,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType
++ncsuc;
if (ratio >= Scalar(.5) || ncsuc > 1)
delta = (std::max)(delta, pnorm / Scalar(.5));
- if (internal::abs(ratio - 1.) <= Scalar(.1)) {
+ if (abs(ratio - 1.) <= Scalar(.1)) {
delta = pnorm / Scalar(.5);
}
}
diff --git a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
index 075faeeb0..bfeb26fc9 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h
@@ -55,8 +55,8 @@ public:
Parameters()
: factor(Scalar(100.))
, maxfev(400)
- , ftol(internal::sqrt(NumTraits<Scalar>::epsilon()))
- , xtol(internal::sqrt(NumTraits<Scalar>::epsilon()))
+ , ftol(std::sqrt(NumTraits<Scalar>::epsilon()))
+ , xtol(std::sqrt(NumTraits<Scalar>::epsilon()))
, gtol(Scalar(0.))
, epsfcn(Scalar(0.)) {}
Scalar factor;
@@ -72,7 +72,7 @@ public:
LevenbergMarquardtSpace::Status lmder1(
FVectorType &x,
- const Scalar tol = internal::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
);
LevenbergMarquardtSpace::Status minimize(FVectorType &x);
@@ -83,12 +83,12 @@ public:
FunctorType &functor,
FVectorType &x,
Index *nfev,
- const Scalar tol = internal::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
);
LevenbergMarquardtSpace::Status lmstr1(
FVectorType &x,
- const Scalar tol = internal::sqrt(NumTraits<Scalar>::epsilon())
+ const Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon())
);
LevenbergMarquardtSpace::Status minimizeOptimumStorage(FVectorType &x);
@@ -172,7 +172,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(FVectorType &x)
fjac.resize(m, n);
if (!useExternalScaling)
diag.resize(n);
- assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
+ eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
qtf.resize(n);
/* Function Body */
@@ -206,7 +206,10 @@ template<typename FunctorType, typename Scalar>
LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
{
- assert(x.size()==n); // check the caller is not cheating us
+ using std::abs;
+ using std::sqrt;
+
+ eigen_assert(x.size()==n); // check the caller is not cheating us
/* calculate the jacobian matrix. */
Index df_ret = functor.df(x, fjac);
@@ -249,7 +252,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
if (fnorm != 0.)
for (Index j = 0; j < n; ++j)
if (wa2[permutation.indices()[j]] != 0.)
- gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
+ gnorm = (std::max)(gnorm, abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
/* test for convergence of the gradient norm. */
if (gnorm <= parameters.gtol)
@@ -282,13 +285,13 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
/* compute the scaled actual reduction. */
actred = -1.;
if (Scalar(.1) * fnorm1 < fnorm)
- actred = 1. - internal::abs2(fnorm1 / fnorm);
+ actred = 1. - numext::abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction and */
/* the scaled directional derivative. */
wa3 = fjac.template triangularView<Upper>() * (qrfac.colsPermutation().inverse() *wa1);
- temp1 = internal::abs2(wa3.stableNorm() / fnorm);
- temp2 = internal::abs2(internal::sqrt(par) * pnorm / fnorm);
+ temp1 = numext::abs2(wa3.stableNorm() / fnorm);
+ temp2 = numext::abs2(sqrt(par) * pnorm / fnorm);
prered = temp1 + temp2 / Scalar(.5);
dirder = -(temp1 + temp2);
@@ -326,9 +329,9 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
}
/* tests for convergence. */
- if (internal::abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
+ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall;
- if (internal::abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
+ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
return LevenbergMarquardtSpace::RelativeReductionTooSmall;
if (delta <= parameters.xtol * xnorm)
return LevenbergMarquardtSpace::RelativeErrorTooSmall;
@@ -336,7 +339,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
/* tests for termination and stringent tolerances. */
if (nfev >= parameters.maxfev)
return LevenbergMarquardtSpace::TooManyFunctionEvaluation;
- if (internal::abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
+ if (abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
return LevenbergMarquardtSpace::FtolTooSmall;
if (delta <= NumTraits<Scalar>::epsilon() * xnorm)
return LevenbergMarquardtSpace::XtolTooSmall;
@@ -388,7 +391,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(FVectorType
fjac.resize(n, n);
if (!useExternalScaling)
diag.resize(n);
- assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
+ eigen_assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
qtf.resize(n);
/* Function Body */
@@ -423,7 +426,10 @@ template<typename FunctorType, typename Scalar>
LevenbergMarquardtSpace::Status
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorType &x)
{
- assert(x.size()==n); // check the caller is not cheating us
+ using std::abs;
+ using std::sqrt;
+
+ eigen_assert(x.size()==n); // check the caller is not cheating us
Index i, j;
bool sing;
@@ -496,7 +502,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
if (fnorm != 0.)
for (j = 0; j < n; ++j)
if (wa2[permutation.indices()[j]] != 0.)
- gnorm = (std::max)(gnorm, internal::abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
+ gnorm = (std::max)(gnorm, abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]]));
/* test for convergence of the gradient norm. */
if (gnorm <= parameters.gtol)
@@ -529,13 +535,13 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
/* compute the scaled actual reduction. */
actred = -1.;
if (Scalar(.1) * fnorm1 < fnorm)
- actred = 1. - internal::abs2(fnorm1 / fnorm);
+ actred = 1. - numext::abs2(fnorm1 / fnorm);
/* compute the scaled predicted reduction and */
/* the scaled directional derivative. */
wa3 = fjac.topLeftCorner(n,n).template triangularView<Upper>() * (permutation.inverse() * wa1);
- temp1 = internal::abs2(wa3.stableNorm() / fnorm);
- temp2 = internal::abs2(internal::sqrt(par) * pnorm / fnorm);
+ temp1 = numext::abs2(wa3.stableNorm() / fnorm);
+ temp2 = numext::abs2(sqrt(par) * pnorm / fnorm);
prered = temp1 + temp2 / Scalar(.5);
dirder = -(temp1 + temp2);
@@ -573,9 +579,9 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
}
/* tests for convergence. */
- if (internal::abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
+ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm)
return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall;
- if (internal::abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
+ if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.)
return LevenbergMarquardtSpace::RelativeReductionTooSmall;
if (delta <= parameters.xtol * xnorm)
return LevenbergMarquardtSpace::RelativeErrorTooSmall;
@@ -583,7 +589,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorTyp
/* tests for termination and stringent tolerances. */
if (nfev >= parameters.maxfev)
return LevenbergMarquardtSpace::TooManyFunctionEvaluation;
- if (internal::abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
+ if (abs(actred) <= NumTraits<Scalar>::epsilon() && prered <= NumTraits<Scalar>::epsilon() && Scalar(.5) * ratio <= 1.)
return LevenbergMarquardtSpace::FtolTooSmall;
if (delta <= NumTraits<Scalar>::epsilon() * xnorm)
return LevenbergMarquardtSpace::XtolTooSmall;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/chkder.h b/unsupported/Eigen/src/NonLinearOptimization/chkder.h
index ad37c5029..db8ff7d6e 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/chkder.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/chkder.h
@@ -16,6 +16,10 @@ void chkder(
Matrix< Scalar, Dynamic, 1 > &err
)
{
+ using std::sqrt;
+ using std::abs;
+ using std::log;
+
typedef DenseIndex Index;
const Scalar eps = sqrt(NumTraits<Scalar>::epsilon());
diff --git a/unsupported/Eigen/src/NonLinearOptimization/covar.h b/unsupported/Eigen/src/NonLinearOptimization/covar.h
index c73a09645..68260d191 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/covar.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/covar.h
@@ -6,8 +6,9 @@ template <typename Scalar>
void covar(
Matrix< Scalar, Dynamic, Dynamic > &r,
const VectorXi &ipvt,
- Scalar tol = sqrt(NumTraits<Scalar>::epsilon()) )
+ Scalar tol = std::sqrt(NumTraits<Scalar>::epsilon()) )
{
+ using std::abs;
typedef DenseIndex Index;
/* Local variables */
@@ -19,7 +20,7 @@ void covar(
const Index n = r.cols();
const Scalar tolr = tol * abs(r(0,0));
Matrix< Scalar, Dynamic, 1 > wa(n);
- assert(ipvt.size()==n);
+ eigen_assert(ipvt.size()==n);
/* form the inverse of r in the full upper triangle of r. */
l = -1;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
index 4fbc98bfc..80c5d277b 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/dogleg.h
@@ -10,6 +10,9 @@ void dogleg(
Scalar delta,
Matrix< Scalar, Dynamic, 1 > &x)
{
+ using std::abs;
+ using std::sqrt;
+
typedef DenseIndex Index;
/* Local variables */
@@ -21,9 +24,9 @@ void dogleg(
/* Function Body */
const Scalar epsmch = NumTraits<Scalar>::epsilon();
const Index n = qrfac.cols();
- assert(n==qtb.size());
- assert(n==x.size());
- assert(n==diag.size());
+ eigen_assert(n==qtb.size());
+ eigen_assert(n==x.size());
+ eigen_assert(n==diag.size());
Matrix< Scalar, Dynamic, 1 > wa1(n), wa2(n);
/* first, calculate the gauss-newton direction. */
@@ -89,8 +92,8 @@ void dogleg(
/* at which the quadratic is minimized. */
bnorm = qtb.stableNorm();
temp = bnorm / gnorm * (bnorm / qnorm) * (sgnorm / delta);
- temp = temp - delta / qnorm * abs2(sgnorm / delta) + sqrt(abs2(temp - delta / qnorm) + (1.-abs2(delta / qnorm)) * (1.-abs2(sgnorm / delta)));
- alpha = delta / qnorm * (1. - abs2(sgnorm / delta)) / temp;
+ temp = temp - delta / qnorm * numext::abs2(sgnorm / delta) + sqrt(numext::abs2(temp - delta / qnorm) + (1.-numext::abs2(delta / qnorm)) * (1.-numext::abs2(sgnorm / delta)));
+ alpha = delta / qnorm * (1. - numext::abs2(sgnorm / delta)) / temp;
algo_end:
/* form appropriate convex combination of the gauss-newton */
diff --git a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
index 1cabe69ae..bb7cf267b 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h
@@ -11,6 +11,9 @@ DenseIndex fdjac1(
DenseIndex ml, DenseIndex mu,
Scalar epsfcn)
{
+ using std::sqrt;
+ using std::abs;
+
typedef DenseIndex Index;
/* Local variables */
@@ -24,7 +27,7 @@ DenseIndex fdjac1(
/* Function Body */
const Scalar epsmch = NumTraits<Scalar>::epsilon();
const Index n = x.size();
- assert(fvec.size()==n);
+ eigen_assert(fvec.size()==n);
Matrix< Scalar, Dynamic, 1 > wa1(n);
Matrix< Scalar, Dynamic, 1 > wa2(n);
diff --git a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
index cc1ca530f..4c17d4cdf 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/lmpar.h
@@ -12,6 +12,8 @@ void lmpar(
Scalar &par,
Matrix< Scalar, Dynamic, 1 > &x)
{
+ using std::abs;
+ using std::sqrt;
typedef DenseIndex Index;
/* Local variables */
@@ -25,11 +27,11 @@ void lmpar(
/* Function Body */
- const Scalar dwarf = std::numeric_limits<Scalar>::min();
+ const Scalar dwarf = (std::numeric_limits<Scalar>::min)();
const Index n = r.cols();
- assert(n==diag.size());
- assert(n==qtb.size());
- assert(n==x.size());
+ eigen_assert(n==diag.size());
+ eigen_assert(n==qtb.size());
+ eigen_assert(n==x.size());
Matrix< Scalar, Dynamic, 1 > wa1, wa2;
@@ -168,6 +170,8 @@ void lmpar2(
Matrix< Scalar, Dynamic, 1 > &x)
{
+ using std::sqrt;
+ using std::abs;
typedef DenseIndex Index;
/* Local variables */
@@ -181,10 +185,10 @@ void lmpar2(
/* Function Body */
- const Scalar dwarf = std::numeric_limits<Scalar>::min();
+ const Scalar dwarf = (std::numeric_limits<Scalar>::min)();
const Index n = qr.matrixQR().cols();
- assert(n==diag.size());
- assert(n==qtb.size());
+ eigen_assert(n==diag.size());
+ eigen_assert(n==qtb.size());
Matrix< Scalar, Dynamic, 1 > wa1, wa2;
diff --git a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
index 55fae5ae8..f28766061 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/r1updt.h
@@ -24,10 +24,10 @@ void r1updt(
// r1updt had a broader usecase, but we dont use it here. And, more
// importantly, we can not test it.
- assert(m==n);
- assert(u.size()==m);
- assert(v.size()==n);
- assert(w.size()==n);
+ eigen_assert(m==n);
+ eigen_assert(u.size()==m);
+ eigen_assert(v.size()==n);
+ eigen_assert(w.size()==n);
/* move the nontrivial part of the last column of s into w. */
w[n-1] = s(n-1,n-1);
diff --git a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
index 9ce079e22..6ebf8563f 100644
--- a/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
+++ b/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h
@@ -12,7 +12,7 @@ void rwupdt(
typedef DenseIndex Index;
const Index n = r.cols();
- assert(r.rows()>=n);
+ eigen_assert(r.rows()>=n);
std::vector<JacobiRotation<Scalar> > givens(n);
/* Local variables */
diff --git a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
index d848cb407..ea5d8bc27 100644
--- a/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
+++ b/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h
@@ -63,11 +63,13 @@ public:
*/
int df(const InputType& _x, JacobianType &jac) const
{
+ using std::sqrt;
+ using std::abs;
/* Local variables */
Scalar h;
int nfev=0;
const typename InputType::Index n = _x.size();
- const Scalar eps = internal::sqrt(((std::max)(epsfcn,NumTraits<Scalar>::epsilon() )));
+ const Scalar eps = sqrt(((std::max)(epsfcn,NumTraits<Scalar>::epsilon() )));
ValueType val1, val2;
InputType x = _x;
// TODO : we should do this only if the size is not already known
@@ -84,12 +86,12 @@ public:
// do nothing
break;
default:
- assert(false);
+ eigen_assert(false);
};
// Function Body
for (int j = 0; j < n; ++j) {
- h = eps * internal::abs(x[j]);
+ h = eps * abs(x[j]);
if (h == 0.) {
h = eps;
}
@@ -110,7 +112,7 @@ public:
jac.col(j) = (val2-val1)/(2*h);
break;
default:
- assert(false);
+ eigen_assert(false);
};
}
return nfev;
diff --git a/unsupported/Eigen/src/Polynomials/Companion.h b/unsupported/Eigen/src/Polynomials/Companion.h
index 4badd9d58..b515c2920 100644
--- a/unsupported/Eigen/src/Polynomials/Companion.h
+++ b/unsupported/Eigen/src/Polynomials/Companion.h
@@ -210,6 +210,7 @@ bool companion<_Scalar,_Deg>::balancedR( Scalar colNorm, Scalar rowNorm,
template< typename _Scalar, int _Deg >
void companion<_Scalar,_Deg>::balance()
{
+ using std::abs;
EIGEN_STATIC_ASSERT( Deg == Dynamic || 1 < Deg, YOU_MADE_A_PROGRAMMING_MISTAKE );
const Index deg = m_monic.size();
const Index deg_1 = deg-1;
diff --git a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
index 70b873dbc..cd5c04bbf 100644
--- a/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
+++ b/unsupported/Eigen/src/Polynomials/PolynomialSolver.h
@@ -69,10 +69,11 @@ class PolynomialSolverBase
inline void realRoots( Stl_back_insertion_sequence& bi_seq,
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
+ using std::abs;
bi_seq.clear();
for(Index i=0; i<m_roots.size(); ++i )
{
- if( internal::abs( m_roots[i].imag() ) < absImaginaryThreshold ){
+ if( abs( m_roots[i].imag() ) < absImaginaryThreshold ){
bi_seq.push_back( m_roots[i].real() ); }
}
}
@@ -82,10 +83,10 @@ class PolynomialSolverBase
inline const RootType& selectComplexRoot_withRespectToNorm( squaredNormBinaryPredicate& pred ) const
{
Index res=0;
- RealScalar norm2 = internal::abs2( m_roots[0] );
+ RealScalar norm2 = numext::abs2( m_roots[0] );
for( Index i=1; i<m_roots.size(); ++i )
{
- const RealScalar currNorm2 = internal::abs2( m_roots[i] );
+ const RealScalar currNorm2 = numext::abs2( m_roots[i] );
if( pred( currNorm2, norm2 ) ){
res=i; norm2=currNorm2; }
}
@@ -118,13 +119,14 @@ class PolynomialSolverBase
bool& hasArealRoot,
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
+ using std::abs;
hasArealRoot = false;
Index res=0;
RealScalar abs2(0);
for( Index i=0; i<m_roots.size(); ++i )
{
- if( internal::abs( m_roots[i].imag() ) < absImaginaryThreshold )
+ if( abs( m_roots[i].imag() ) < absImaginaryThreshold )
{
if( !hasArealRoot )
{
@@ -144,11 +146,11 @@ class PolynomialSolverBase
}
else
{
- if( internal::abs( m_roots[i].imag() ) < internal::abs( m_roots[res].imag() ) ){
+ if( abs( m_roots[i].imag() ) < abs( m_roots[res].imag() ) ){
res = i; }
}
}
- return internal::real_ref(m_roots[res]);
+ return numext::real_ref(m_roots[res]);
}
@@ -158,13 +160,14 @@ class PolynomialSolverBase
bool& hasArealRoot,
const RealScalar& absImaginaryThreshold = NumTraits<Scalar>::dummy_precision() ) const
{
+ using std::abs;
hasArealRoot = false;
Index res=0;
RealScalar val(0);
for( Index i=0; i<m_roots.size(); ++i )
{
- if( internal::abs( m_roots[i].imag() ) < absImaginaryThreshold )
+ if( abs( m_roots[i].imag() ) < absImaginaryThreshold )
{
if( !hasArealRoot )
{
@@ -184,11 +187,11 @@ class PolynomialSolverBase
}
else
{
- if( internal::abs( m_roots[i].imag() ) < internal::abs( m_roots[res].imag() ) ){
+ if( abs( m_roots[i].imag() ) < abs( m_roots[res].imag() ) ){
res = i; }
}
}
- return internal::real_ref(m_roots[res]);
+ return numext::real_ref(m_roots[res]);
}
public:
@@ -341,7 +344,7 @@ class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg>
template< typename OtherPolynomial >
void compute( const OtherPolynomial& poly )
{
- assert( Scalar(0) != poly[poly.size()-1] );
+ eigen_assert( Scalar(0) != poly[poly.size()-1] );
internal::companion<Scalar,_Deg> companion( poly );
companion.balance();
m_eigenSolver.compute( companion.denseMatrix() );
@@ -373,7 +376,7 @@ class PolynomialSolver<_Scalar,1> : public PolynomialSolverBase<_Scalar,1>
template< typename OtherPolynomial >
void compute( const OtherPolynomial& poly )
{
- assert( Scalar(0) != poly[poly.size()-1] );
+ eigen_assert( Scalar(0) != poly[poly.size()-1] );
m_roots[0] = -poly[0]/poly[poly.size()-1];
}
diff --git a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
index c23204c65..2bb8bc84a 100644
--- a/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
+++ b/unsupported/Eigen/src/Polynomials/PolynomialUtils.h
@@ -47,7 +47,7 @@ T poly_eval( const Polynomials& poly, const T& x )
{
typedef typename NumTraits<T>::Real Real;
- if( internal::abs2( x ) <= Real(1) ){
+ if( numext::abs2( x ) <= Real(1) ){
return poly_eval_horner( poly, x ); }
else
{
@@ -74,15 +74,16 @@ template <typename Polynomial>
inline
typename NumTraits<typename Polynomial::Scalar>::Real cauchy_max_bound( const Polynomial& poly )
{
+ using std::abs;
typedef typename Polynomial::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real Real;
- assert( Scalar(0) != poly[poly.size()-1] );
+ eigen_assert( Scalar(0) != poly[poly.size()-1] );
const Scalar inv_leading_coeff = Scalar(1)/poly[poly.size()-1];
Real cb(0);
for( DenseIndex i=0; i<poly.size()-1; ++i ){
- cb += internal::abs(poly[i]*inv_leading_coeff); }
+ cb += abs(poly[i]*inv_leading_coeff); }
return cb + Real(1);
}
@@ -96,6 +97,7 @@ template <typename Polynomial>
inline
typename NumTraits<typename Polynomial::Scalar>::Real cauchy_min_bound( const Polynomial& poly )
{
+ using std::abs;
typedef typename Polynomial::Scalar Scalar;
typedef typename NumTraits<Scalar>::Real Real;
@@ -107,7 +109,7 @@ typename NumTraits<typename Polynomial::Scalar>::Real cauchy_min_bound( const Po
const Scalar inv_min_coeff = Scalar(1)/poly[i];
Real cb(1);
for( DenseIndex j=i+1; j<poly.size(); ++j ){
- cb += internal::abs(poly[j]*inv_min_coeff); }
+ cb += abs(poly[j]*inv_min_coeff); }
return Real(1)/cb;
}
diff --git a/unsupported/Eigen/src/SVD/BDCSVD.h b/unsupported/Eigen/src/SVD/BDCSVD.h
new file mode 100644
index 000000000..11d4882e4
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/BDCSVD.h
@@ -0,0 +1,748 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
+// research report written by Ming Gu and Stanley C.Eisenstat
+// The code variable names correspond to the names they used in their
+// report
+//
+// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
+// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
+// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
+// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
+//
+// Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_BDCSVD_H
+#define EIGEN_BDCSVD_H
+
+#define EPSILON 0.0000000000000001
+
+#define ALGOSWAP 32
+
+namespace Eigen {
+/** \ingroup SVD_Module
+ *
+ *
+ * \class BDCSVD
+ *
+ * \brief class Bidiagonal Divide and Conquer SVD
+ *
+ * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+ * We plan to have a very similar interface to JacobiSVD on this class.
+ * It should be used to speed up the calcul of SVD for big matrices.
+ */
+template<typename _MatrixType>
+class BDCSVD : public SVDBase<_MatrixType>
+{
+ typedef SVDBase<_MatrixType> Base;
+
+public:
+ using Base::rows;
+ using Base::cols;
+
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime),
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime),
+ MatrixOptions = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+ MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+ MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+ MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+ MatrixVType;
+ typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowType;
+ typedef typename internal::plain_col_type<MatrixType>::type ColType;
+ typedef Matrix<Scalar, Dynamic, Dynamic> MatrixX;
+ typedef Matrix<RealScalar, Dynamic, Dynamic> MatrixXr;
+ typedef Matrix<RealScalar, Dynamic, 1> VectorType;
+
+ /** \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via BDCSVD::compute(const MatrixType&).
+ */
+ BDCSVD()
+ : SVDBase<_MatrixType>::SVDBase(),
+ algoswap(ALGOSWAP)
+ {}
+
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem size.
+ * \sa BDCSVD()
+ */
+ BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
+ : SVDBase<_MatrixType>::SVDBase(),
+ algoswap(ALGOSWAP)
+ {
+ allocate(rows, cols, computationOptions);
+ }
+
+ /** \brief Constructor performing the decomposition of given matrix.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non - default) FullPivHouseholderQR preconditioner.
+ */
+ BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
+ : SVDBase<_MatrixType>::SVDBase(),
+ algoswap(ALGOSWAP)
+ {
+ compute(matrix, computationOptions);
+ }
+
+ ~BDCSVD()
+ {
+ }
+ /** \brief Method performing the decomposition of given matrix using custom options.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit - field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non - default) FullPivHouseholderQR preconditioner.
+ */
+ SVDBase<MatrixType>& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+ /** \brief Method performing the decomposition of given matrix using current options.
+ *
+ * \param matrix the matrix to decompose
+ *
+ * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+ */
+ SVDBase<MatrixType>& compute(const MatrixType& matrix)
+ {
+ return compute(matrix, this->m_computationOptions);
+ }
+
+ void setSwitchSize(int s)
+ {
+ eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 4");
+ algoswap = s;
+ }
+
+
+ /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
+ *
+ * \param b the right - hand - side of the equation to solve.
+ *
+ * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
+ *
+ * \note SVD solving is implicitly least - squares. Thus, this method serves both purposes of exact solving and least - squares solving.
+ * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<BDCSVD, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(this->m_isInitialized && "BDCSVD is not initialized.");
+ eigen_assert(SVDBase<_MatrixType>::computeU() && SVDBase<_MatrixType>::computeV() &&
+ "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
+ return internal::solve_retval<BDCSVD, Rhs>(*this, b.derived());
+ }
+
+
+ const MatrixUType& matrixU() const
+ {
+ eigen_assert(this->m_isInitialized && "SVD is not initialized.");
+ if (isTranspose){
+ eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?");
+ return this->m_matrixV;
+ }
+ else
+ {
+ eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
+ return this->m_matrixU;
+ }
+
+ }
+
+
+ const MatrixVType& matrixV() const
+ {
+ eigen_assert(this->m_isInitialized && "SVD is not initialized.");
+ if (isTranspose){
+ eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?");
+ return this->m_matrixU;
+ }
+ else
+ {
+ eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
+ return this->m_matrixV;
+ }
+ }
+
+private:
+ void allocate(Index rows, Index cols, unsigned int computationOptions);
+ void divide (Index firstCol, Index lastCol, Index firstRowW,
+ Index firstColW, Index shift);
+ void deflation43(Index firstCol, Index shift, Index i, Index size);
+ void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
+ void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
+ void copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX houseHolderV);
+
+protected:
+ MatrixXr m_naiveU, m_naiveV;
+ MatrixXr m_computed;
+ Index nRec;
+ int algoswap;
+ bool isTranspose, compU, compV;
+
+}; //end class BDCSVD
+
+
+// Methode to allocate ans initialize matrix and attributs
+template<typename MatrixType>
+void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+ isTranspose = (cols > rows);
+ if (SVDBase<MatrixType>::allocate(rows, cols, computationOptions)) return;
+ m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize );
+ if (isTranspose){
+ compU = this->computeU();
+ compV = this->computeV();
+ }
+ else
+ {
+ compV = this->computeU();
+ compU = this->computeV();
+ }
+ if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 );
+ else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 );
+
+ if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize);
+
+
+ //should be changed for a cleaner implementation
+ if (isTranspose){
+ bool aux;
+ if (this->computeU()||this->computeV()){
+ aux = this->m_computeFullU;
+ this->m_computeFullU = this->m_computeFullV;
+ this->m_computeFullV = aux;
+ aux = this->m_computeThinU;
+ this->m_computeThinU = this->m_computeThinV;
+ this->m_computeThinV = aux;
+ }
+ }
+}// end allocate
+
+// Methode which compute the BDCSVD for the int
+template<>
+SVDBase<Matrix<int, Dynamic, Dynamic> >&
+BDCSVD<Matrix<int, Dynamic, Dynamic> >::compute(const MatrixType& matrix, unsigned int computationOptions) {
+ allocate(matrix.rows(), matrix.cols(), computationOptions);
+ this->m_nonzeroSingularValues = 0;
+ m_computed = Matrix<int, Dynamic, Dynamic>::Zero(rows(), cols());
+ for (int i=0; i<this->m_diagSize; i++) {
+ this->m_singularValues.coeffRef(i) = 0;
+ }
+ if (this->m_computeFullU) this->m_matrixU = Matrix<int, Dynamic, Dynamic>::Zero(rows(), rows());
+ if (this->m_computeFullV) this->m_matrixV = Matrix<int, Dynamic, Dynamic>::Zero(cols(), cols());
+ this->m_isInitialized = true;
+ return *this;
+}
+
+
+// Methode which compute the BDCSVD
+template<typename MatrixType>
+SVDBase<MatrixType>&
+BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions)
+{
+ allocate(matrix.rows(), matrix.cols(), computationOptions);
+ using std::abs;
+
+ //**** step 1 Bidiagonalization isTranspose = (matrix.cols()>matrix.rows()) ;
+ MatrixType copy;
+ if (isTranspose) copy = matrix.adjoint();
+ else copy = matrix;
+
+ internal::UpperBidiagonalization<MatrixX > bid(copy);
+
+ //**** step 2 Divide
+ // this is ugly and has to be redone (care of complex cast)
+ MatrixXr temp;
+ temp = bid.bidiagonal().toDenseMatrix().transpose();
+ m_computed.setZero();
+ for (int i=0; i<this->m_diagSize - 1; i++) {
+ m_computed(i, i) = temp(i, i);
+ m_computed(i + 1, i) = temp(i + 1, i);
+ }
+ m_computed(this->m_diagSize - 1, this->m_diagSize - 1) = temp(this->m_diagSize - 1, this->m_diagSize - 1);
+ divide(0, this->m_diagSize - 1, 0, 0, 0);
+
+ //**** step 3 copy
+ for (int i=0; i<this->m_diagSize; i++) {
+ RealScalar a = abs(m_computed.coeff(i, i));
+ this->m_singularValues.coeffRef(i) = a;
+ if (a == 0){
+ this->m_nonzeroSingularValues = i;
+ break;
+ }
+ else if (i == this->m_diagSize - 1)
+ {
+ this->m_nonzeroSingularValues = i + 1;
+ break;
+ }
+ }
+ copyUV(m_naiveV, m_naiveU, bid.householderU(), bid.householderV());
+ this->m_isInitialized = true;
+ return *this;
+}// end compute
+
+
+template<typename MatrixType>
+void BDCSVD<MatrixType>::copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX householderV){
+ if (this->computeU()){
+ MatrixX temp = MatrixX::Zero(naiveU.rows(), naiveU.cols());
+ temp.real() = naiveU;
+ if (this->m_computeThinU){
+ this->m_matrixU = MatrixX::Identity(householderU.cols(), this->m_nonzeroSingularValues );
+ this->m_matrixU.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues) =
+ temp.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues);
+ this->m_matrixU = householderU * this->m_matrixU ;
+ }
+ else
+ {
+ this->m_matrixU = MatrixX::Identity(householderU.cols(), householderU.cols());
+ this->m_matrixU.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize);
+ this->m_matrixU = householderU * this->m_matrixU ;
+ }
+ }
+ if (this->computeV()){
+ MatrixX temp = MatrixX::Zero(naiveV.rows(), naiveV.cols());
+ temp.real() = naiveV;
+ if (this->m_computeThinV){
+ this->m_matrixV = MatrixX::Identity(householderV.cols(),this->m_nonzeroSingularValues );
+ this->m_matrixV.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues) =
+ temp.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues);
+ this->m_matrixV = householderV * this->m_matrixV ;
+ }
+ else
+ {
+ this->m_matrixV = MatrixX::Identity(householderV.cols(), householderV.cols());
+ this->m_matrixV.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize);
+ this->m_matrixV = householderV * this->m_matrixV;
+ }
+ }
+}
+
+// The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the
+// place of the submatrix we are currently working on.
+
+//@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
+//@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU;
+// lastCol + 1 - firstCol is the size of the submatrix.
+//@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
+//@param firstRowW : Same as firstRowW with the column.
+//@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix
+// to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
+template<typename MatrixType>
+void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW,
+ Index firstColW, Index shift)
+{
+ // requires nbRows = nbCols + 1;
+ using std::pow;
+ using std::sqrt;
+ using std::abs;
+ const Index n = lastCol - firstCol + 1;
+ const Index k = n/2;
+ RealScalar alphaK;
+ RealScalar betaK;
+ RealScalar r0;
+ RealScalar lambda, phi, c0, s0;
+ MatrixXr l, f;
+ // We use the other algorithm which is more efficient for small
+ // matrices.
+ if (n < algoswap){
+ JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n),
+ ComputeFullU | (ComputeFullV * compV)) ;
+ if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU();
+ else
+ {
+ m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0);
+ m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n);
+ }
+ if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV();
+ m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
+ for (int i=0; i<n; i++)
+ {
+ m_computed(firstCol + shift + i, firstCol + shift +i) = b.singularValues().coeffRef(i);
+ }
+ return;
+ }
+ // We use the divide and conquer algorithm
+ alphaK = m_computed(firstCol + k, firstCol + k);
+ betaK = m_computed(firstCol + k + 1, firstCol + k);
+ // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
+ // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the
+ // right submatrix before the left one.
+ divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
+ divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
+ if (compU)
+ {
+ lambda = m_naiveU(firstCol + k, firstCol + k);
+ phi = m_naiveU(firstCol + k + 1, lastCol + 1);
+ }
+ else
+ {
+ lambda = m_naiveU(1, firstCol + k);
+ phi = m_naiveU(0, lastCol + 1);
+ }
+ r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda))
+ + abs(betaK * phi) * abs(betaK * phi));
+ if (compU)
+ {
+ l = m_naiveU.row(firstCol + k).segment(firstCol, k);
+ f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
+ }
+ else
+ {
+ l = m_naiveU.row(1).segment(firstCol, k);
+ f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
+ }
+ if (compV) m_naiveV(firstRowW+k, firstColW) = 1;
+ if (r0 == 0)
+ {
+ c0 = 1;
+ s0 = 0;
+ }
+ else
+ {
+ c0 = alphaK * lambda / r0;
+ s0 = betaK * phi / r0;
+ }
+ if (compU)
+ {
+ MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));
+ // we shiftW Q1 to the right
+ for (Index i = firstCol + k - 1; i >= firstCol; i--)
+ {
+ m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1);
+ }
+ // we shift q1 at the left with a factor c0
+ m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0);
+ // last column = q1 * - s0
+ m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0));
+ // first column = q2 * s0
+ m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) <<
+ m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0;
+ // q2 *= c0
+ m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0;
+ }
+ else
+ {
+ RealScalar q1 = (m_naiveU(0, firstCol + k));
+ // we shift Q1 to the right
+ for (Index i = firstCol + k - 1; i >= firstCol; i--)
+ {
+ m_naiveU(0, i + 1) = m_naiveU(0, i);
+ }
+ // we shift q1 at the left with a factor c0
+ m_naiveU(0, firstCol) = (q1 * c0);
+ // last column = q1 * - s0
+ m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
+ // first column = q2 * s0
+ m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0;
+ // q2 *= c0
+ m_naiveU(1, lastCol + 1) *= c0;
+ m_naiveU.row(1).segment(firstCol + 1, k).setZero();
+ m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
+ }
+ m_computed(firstCol + shift, firstCol + shift) = r0;
+ m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real();
+ m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real();
+
+
+ // the line below do the deflation of the matrix for the third part of the algorithm
+ // Here the deflation is commented because the third part of the algorithm is not implemented
+ // the third part of the algorithm is a fast SVD on the matrix m_computed which works thanks to the deflation
+
+ deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
+
+ // Third part of the algorithm, since the real third part of the algorithm is not implemeted we use a JacobiSVD
+ JacobiSVD<MatrixXr> res= JacobiSVD<MatrixXr>(m_computed.block(firstCol + shift, firstCol +shift, n + 1, n),
+ ComputeFullU | (ComputeFullV * compV)) ;
+ if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= res.matrixU();
+ else m_naiveU.block(0, firstCol, 2, n + 1) *= res.matrixU();
+
+ if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= res.matrixV();
+ m_computed.block(firstCol + shift, firstCol + shift, n, n) << MatrixXr::Zero(n, n);
+ for (int i=0; i<n; i++)
+ m_computed(firstCol + shift + i, firstCol + shift +i) = res.singularValues().coeffRef(i);
+ // end of the third part
+
+
+}// end divide
+
+
+// page 12_13
+// i >= 1, di almost null and zi non null.
+// We use a rotation to zero out zi applied to the left of M
+template <typename MatrixType>
+void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size){
+ using std::abs;
+ using std::sqrt;
+ using std::pow;
+ RealScalar c = m_computed(firstCol + shift, firstCol + shift);
+ RealScalar s = m_computed(i, firstCol + shift);
+ RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
+ if (r == 0){
+ m_computed(i, i)=0;
+ return;
+ }
+ c/=r;
+ s/=r;
+ m_computed(firstCol + shift, firstCol + shift) = r;
+ m_computed(i, firstCol + shift) = 0;
+ m_computed(i, i) = 0;
+ if (compU){
+ m_naiveU.col(firstCol).segment(firstCol,size) =
+ c * m_naiveU.col(firstCol).segment(firstCol, size) -
+ s * m_naiveU.col(i).segment(firstCol, size) ;
+
+ m_naiveU.col(i).segment(firstCol, size) =
+ (c + s*s/c) * m_naiveU.col(i).segment(firstCol, size) +
+ (s/c) * m_naiveU.col(firstCol).segment(firstCol,size);
+ }
+}// end deflation 43
+
+
+// page 13
+// i,j >= 1, i != j and |di - dj| < epsilon * norm2(M)
+// We apply two rotations to have zj = 0;
+template <typename MatrixType>
+void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){
+ using std::abs;
+ using std::sqrt;
+ using std::conj;
+ using std::pow;
+ RealScalar c = m_computed(firstColm, firstColm + j - 1);
+ RealScalar s = m_computed(firstColm, firstColm + i - 1);
+ RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
+ if (r==0){
+ m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
+ return;
+ }
+ c/=r;
+ s/=r;
+ m_computed(firstColm + i, firstColm) = r;
+ m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
+ m_computed(firstColm + j, firstColm) = 0;
+ if (compU){
+ m_naiveU.col(firstColu + i).segment(firstColu, size) =
+ c * m_naiveU.col(firstColu + i).segment(firstColu, size) -
+ s * m_naiveU.col(firstColu + j).segment(firstColu, size) ;
+
+ m_naiveU.col(firstColu + j).segment(firstColu, size) =
+ (c + s*s/c) * m_naiveU.col(firstColu + j).segment(firstColu, size) +
+ (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size);
+ }
+ if (compV){
+ m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) =
+ c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) +
+ s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ;
+
+ m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) =
+ (c + s*s/c) * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) -
+ (s/c) * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1);
+ }
+}// end deflation 44
+
+
+
+template <typename MatrixType>
+void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){
+ //condition 4.1
+ RealScalar EPS = EPSILON * (std::max<RealScalar>(m_computed(firstCol + shift + 1, firstCol + shift + 1), m_computed(firstCol + k, firstCol + k)));
+ const Index length = lastCol + 1 - firstCol;
+ if (m_computed(firstCol + shift, firstCol + shift) < EPS){
+ m_computed(firstCol + shift, firstCol + shift) = EPS;
+ }
+ //condition 4.2
+ for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){
+ if (std::abs(m_computed(i, firstCol + shift)) < EPS){
+ m_computed(i, firstCol + shift) = 0;
+ }
+ }
+
+ //condition 4.3
+ for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){
+ if (m_computed(i, i) < EPS){
+ deflation43(firstCol, shift, i, length);
+ }
+ }
+
+ //condition 4.4
+
+ Index i=firstCol + shift + 1, j=firstCol + shift + k + 1;
+ //we stock the final place of each line
+ Index *permutation = new Index[length];
+
+ for (Index p =1; p < length; p++) {
+ if (i> firstCol + shift + k){
+ permutation[p] = j;
+ j++;
+ } else if (j> lastCol + shift)
+ {
+ permutation[p] = i;
+ i++;
+ }
+ else
+ {
+ if (m_computed(i, i) < m_computed(j, j)){
+ permutation[p] = j;
+ j++;
+ }
+ else
+ {
+ permutation[p] = i;
+ i++;
+ }
+ }
+ }
+ //we do the permutation
+ RealScalar aux;
+ //we stock the current index of each col
+ //and the column of each index
+ Index *realInd = new Index[length];
+ Index *realCol = new Index[length];
+ for (int pos = 0; pos< length; pos++){
+ realCol[pos] = pos + firstCol + shift;
+ realInd[pos] = pos;
+ }
+ const Index Zero = firstCol + shift;
+ VectorType temp;
+ for (int i = 1; i < length - 1; i++){
+ const Index I = i + Zero;
+ const Index realI = realInd[i];
+ const Index j = permutation[length - i] - Zero;
+ const Index J = realCol[j];
+
+ //diag displace
+ aux = m_computed(I, I);
+ m_computed(I, I) = m_computed(J, J);
+ m_computed(J, J) = aux;
+
+ //firstrow displace
+ aux = m_computed(I, Zero);
+ m_computed(I, Zero) = m_computed(J, Zero);
+ m_computed(J, Zero) = aux;
+
+ // change columns
+ if (compU) {
+ temp = m_naiveU.col(I - shift).segment(firstCol, length + 1);
+ m_naiveU.col(I - shift).segment(firstCol, length + 1) <<
+ m_naiveU.col(J - shift).segment(firstCol, length + 1);
+ m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp;
+ }
+ else
+ {
+ temp = m_naiveU.col(I - shift).segment(0, 2);
+ m_naiveU.col(I - shift).segment(0, 2) <<
+ m_naiveU.col(J - shift).segment(0, 2);
+ m_naiveU.col(J - shift).segment(0, 2) << temp;
+ }
+ if (compV) {
+ const Index CWI = I + firstColW - Zero;
+ const Index CWJ = J + firstColW - Zero;
+ temp = m_naiveV.col(CWI).segment(firstRowW, length);
+ m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length);
+ m_naiveV.col(CWJ).segment(firstRowW, length) << temp;
+ }
+
+ //update real pos
+ realCol[realI] = J;
+ realCol[j] = I;
+ realInd[J - Zero] = realI;
+ realInd[I - Zero] = j;
+ }
+ for (Index i = firstCol + shift + 1; i<lastCol + shift;i++){
+ if ((m_computed(i + 1, i + 1) - m_computed(i, i)) < EPS){
+ deflation44(firstCol ,
+ firstCol + shift,
+ firstRowW,
+ firstColW,
+ i - Zero,
+ i + 1 - Zero,
+ length);
+ }
+ }
+ delete [] permutation;
+ delete [] realInd;
+ delete [] realCol;
+
+}//end deflation
+
+
+namespace internal{
+
+template<typename _MatrixType, typename Rhs>
+struct solve_retval<BDCSVD<_MatrixType>, Rhs>
+ : solve_retval_base<BDCSVD<_MatrixType>, Rhs>
+{
+ typedef BDCSVD<_MatrixType> BDCSVDType;
+ EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ eigen_assert(rhs().rows() == dec().rows());
+ // A = U S V^*
+ // So A^{ - 1} = V S^{ - 1} U^*
+ Index diagSize = (std::min)(dec().rows(), dec().cols());
+ typename BDCSVDType::SingularValuesType invertedSingVals(diagSize);
+ Index nonzeroSingVals = dec().nonzeroSingularValues();
+ invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
+ invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
+
+ dst = dec().matrixV().leftCols(diagSize)
+ * invertedSingVals.asDiagonal()
+ * dec().matrixU().leftCols(diagSize).adjoint()
+ * rhs();
+ return;
+ }
+};
+
+} //end namespace internal
+
+ /** \svd_module
+ *
+ * \return the singular value decomposition of \c *this computed by
+ * BDC Algorithm
+ *
+ * \sa class BDCSVD
+ */
+/*
+template<typename Derived>
+BDCSVD<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
+{
+ return BDCSVD<PlainObject>(*this, computationOptions);
+}
+*/
+
+} // end namespace Eigen
+
+#endif
diff --git a/unsupported/Eigen/src/SVD/CMakeLists.txt b/unsupported/Eigen/src/SVD/CMakeLists.txt
new file mode 100644
index 000000000..b40baf092
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/CMakeLists.txt
@@ -0,0 +1,6 @@
+FILE(GLOB Eigen_SVD_SRCS "*.h")
+
+INSTALL(FILES
+ ${Eigen_SVD_SRCS}
+ DESTINATION ${INCLUDE_INSTALL_DIR}unsupported/Eigen/src/SVD COMPONENT Devel
+ )
diff --git a/unsupported/Eigen/src/SVD/JacobiSVD.h b/unsupported/Eigen/src/SVD/JacobiSVD.h
new file mode 100644
index 000000000..02fac409e
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/JacobiSVD.h
@@ -0,0 +1,782 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_JACOBISVD_H
+#define EIGEN_JACOBISVD_H
+
+namespace Eigen {
+
+namespace internal {
+// forward declaration (needed by ICC)
+// the empty body is required by MSVC
+template<typename MatrixType, int QRPreconditioner,
+ bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
+struct svd_precondition_2x2_block_to_be_real {};
+
+/*** QR preconditioners (R-SVD)
+ ***
+ *** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
+ *** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
+ *** JacobiSVD which by itself is only able to work on square matrices.
+ ***/
+
+enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+struct qr_preconditioner_should_do_anything
+{
+ enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
+ b = MatrixType::RowsAtCompileTime != Dynamic &&
+ MatrixType::ColsAtCompileTime != Dynamic &&
+ MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
+ ret = !( (QRPreconditioner == NoQRPreconditioner) ||
+ (Case == PreconditionIfMoreColsThanRows && bool(a)) ||
+ (Case == PreconditionIfMoreRowsThanCols && bool(b)) )
+ };
+};
+
+template<typename MatrixType, int QRPreconditioner, int Case,
+ bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
+> struct qr_preconditioner_impl {};
+
+template<typename MatrixType, int QRPreconditioner, int Case>
+class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
+ bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
+ {
+ return false;
+ }
+};
+
+/*** preconditioner using FullPivHouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
+ };
+ typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
+
+ void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
+ }
+ if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+ }
+
+ bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ m_qr.compute(matrix);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
+ if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
+ return true;
+ }
+ return false;
+ }
+private:
+ typedef FullPivHouseholderQR<MatrixType> QRType;
+ QRType m_qr;
+ WorkspaceType m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ Options = MatrixType::Options
+ };
+ typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+
+ void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
+ }
+ m_adjoint.resize(svd.cols(), svd.rows());
+ if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+ }
+
+ bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ m_adjoint = matrix.adjoint();
+ m_qr.compute(m_adjoint);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
+ if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
+ return true;
+ }
+ else return false;
+ }
+private:
+ typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
+ TransposeTypeWithSameStorageOrder m_adjoint;
+ typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** preconditioner using ColPivHouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+
+ void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
+ }
+ if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+ else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
+ }
+
+ bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ m_qr.compute(matrix);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
+ else if(svd.m_computeThinU)
+ {
+ svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
+ }
+ if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
+ return true;
+ }
+ return false;
+ }
+
+private:
+ typedef ColPivHouseholderQR<MatrixType> QRType;
+ QRType m_qr;
+ typename internal::plain_col_type<MatrixType>::type m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ Options = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+
+ void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
+ {
+ if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
+ }
+ if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+ else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
+ m_adjoint.resize(svd.cols(), svd.rows());
+ }
+
+ bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ m_adjoint = matrix.adjoint();
+ m_qr.compute(m_adjoint);
+
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
+ else if(svd.m_computeThinV)
+ {
+ svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
+ }
+ if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
+ return true;
+ }
+ else return false;
+ }
+
+private:
+ typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
+ TransposeTypeWithSameStorageOrder m_adjoint;
+ typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** preconditioner using HouseholderQR ***/
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+
+ void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
+ {
+ if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
+ }
+ if (svd.m_computeFullU) m_workspace.resize(svd.rows());
+ else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
+ }
+
+ bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.rows() > matrix.cols())
+ {
+ m_qr.compute(matrix);
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
+ if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
+ else if(svd.m_computeThinU)
+ {
+ svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
+ }
+ if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
+ return true;
+ }
+ return false;
+ }
+private:
+ typedef HouseholderQR<MatrixType> QRType;
+ QRType m_qr;
+ typename internal::plain_col_type<MatrixType>::type m_workspace;
+};
+
+template<typename MatrixType>
+class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
+{
+public:
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ enum
+ {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ Options = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime>
+ TransposeTypeWithSameStorageOrder;
+
+ void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
+ {
+ if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
+ {
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
+ }
+ if (svd.m_computeFullV) m_workspace.resize(svd.cols());
+ else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
+ m_adjoint.resize(svd.cols(), svd.rows());
+ }
+
+ bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
+ {
+ if(matrix.cols() > matrix.rows())
+ {
+ m_adjoint = matrix.adjoint();
+ m_qr.compute(m_adjoint);
+
+ svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
+ if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
+ else if(svd.m_computeThinV)
+ {
+ svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
+ m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
+ }
+ if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
+ return true;
+ }
+ else return false;
+ }
+
+private:
+ typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
+ TransposeTypeWithSameStorageOrder m_adjoint;
+ typename internal::plain_row_type<MatrixType>::type m_workspace;
+};
+
+/*** 2x2 SVD implementation
+ ***
+ *** JacobiSVD consists in performing a series of 2x2 SVD subproblems
+ ***/
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
+{
+ typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+ typedef typename SVD::Index Index;
+ static void run(typename SVD::WorkMatrixType&, SVD&, Index, Index) {}
+};
+
+template<typename MatrixType, int QRPreconditioner>
+struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
+{
+ typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::RealScalar RealScalar;
+ typedef typename SVD::Index Index;
+ static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
+ {
+ using std::sqrt;
+ Scalar z;
+ JacobiRotation<Scalar> rot;
+ RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
+ if(n==0)
+ {
+ z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+ work_matrix.row(p) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
+ z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ work_matrix.row(q) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ }
+ else
+ {
+ rot.c() = conj(work_matrix.coeff(p,p)) / n;
+ rot.s() = work_matrix.coeff(q,p) / n;
+ work_matrix.applyOnTheLeft(p,q,rot);
+ if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
+ if(work_matrix.coeff(p,q) != Scalar(0))
+ {
+ Scalar z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
+ work_matrix.col(q) *= z;
+ if(svd.computeV()) svd.m_matrixV.col(q) *= z;
+ }
+ if(work_matrix.coeff(q,q) != Scalar(0))
+ {
+ z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ work_matrix.row(q) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ }
+ }
+ }
+};
+
+template<typename MatrixType, typename RealScalar, typename Index>
+void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
+ JacobiRotation<RealScalar> *j_left,
+ JacobiRotation<RealScalar> *j_right)
+{
+ using std::sqrt;
+ Matrix<RealScalar,2,2> m;
+ m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
+ numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
+ JacobiRotation<RealScalar> rot1;
+ RealScalar t = m.coeff(0,0) + m.coeff(1,1);
+ RealScalar d = m.coeff(1,0) - m.coeff(0,1);
+ if(t == RealScalar(0))
+ {
+ rot1.c() = RealScalar(0);
+ rot1.s() = d > RealScalar(0) ? RealScalar(1) : RealScalar(-1);
+ }
+ else
+ {
+ RealScalar u = d / t;
+ rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + numext::abs2(u));
+ rot1.s() = rot1.c() * u;
+ }
+ m.applyOnTheLeft(0,1,rot1);
+ j_right->makeJacobi(m,0,1);
+ *j_left = rot1 * j_right->transpose();
+}
+
+} // end namespace internal
+
+/** \ingroup SVD_Module
+ *
+ *
+ * \class JacobiSVD
+ *
+ * \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
+ *
+ * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+ * \param QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
+ * for the R-SVD step for non-square matrices. See discussion of possible values below.
+ *
+ * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
+ * \f[ A = U S V^* \f]
+ * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
+ * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
+ * and right \em singular \em vectors of \a A respectively.
+ *
+ * Singular values are always sorted in decreasing order.
+ *
+ * This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
+ *
+ * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
+ * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
+ * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
+ * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
+ *
+ * Here's an example demonstrating basic usage:
+ * \include JacobiSVD_basic.cpp
+ * Output: \verbinclude JacobiSVD_basic.out
+ *
+ * This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
+ * bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
+ * \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
+ * In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
+ *
+ * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
+ * terminate in finite (and reasonable) time.
+ *
+ * The possible values for QRPreconditioner are:
+ * \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
+ * \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
+ * Contrary to other QRs, it doesn't allow computing thin unitaries.
+ * \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
+ * This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
+ * is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
+ * process is more reliable than the optimized bidiagonal SVD iterations.
+ * \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
+ * JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
+ * faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
+ * if QR preconditioning is needed before applying it anyway.
+ *
+ * \sa MatrixBase::jacobiSvd()
+ */
+template<typename _MatrixType, int QRPreconditioner>
+class JacobiSVD : public SVDBase<_MatrixType>
+{
+ public:
+
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
+ MatrixOptions = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+ MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+ MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+ MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+ MatrixVType;
+ typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowType;
+ typedef typename internal::plain_col_type<MatrixType>::type ColType;
+ typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
+ MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
+ WorkMatrixType;
+
+ /** \brief Default Constructor.
+ *
+ * The default constructor is useful in cases in which the user intends to
+ * perform decompositions via JacobiSVD::compute(const MatrixType&).
+ */
+ JacobiSVD()
+ : SVDBase<_MatrixType>::SVDBase()
+ {}
+
+
+ /** \brief Default Constructor with memory preallocation
+ *
+ * Like the default constructor but with preallocation of the internal data
+ * according to the specified problem size.
+ * \sa JacobiSVD()
+ */
+ JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
+ : SVDBase<_MatrixType>::SVDBase()
+ {
+ allocate(rows, cols, computationOptions);
+ }
+
+ /** \brief Constructor performing the decomposition of given matrix.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
+ : SVDBase<_MatrixType>::SVDBase()
+ {
+ compute(matrix, computationOptions);
+ }
+
+ /** \brief Method performing the decomposition of given matrix using custom options.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ SVDBase<MatrixType>& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+ /** \brief Method performing the decomposition of given matrix using current options.
+ *
+ * \param matrix the matrix to decompose
+ *
+ * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+ */
+ SVDBase<MatrixType>& compute(const MatrixType& matrix)
+ {
+ return compute(matrix, this->m_computationOptions);
+ }
+
+ /** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
+ *
+ * \param b the right-hand-side of the equation to solve.
+ *
+ * \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
+ *
+ * \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
+ * In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
+ */
+ template<typename Rhs>
+ inline const internal::solve_retval<JacobiSVD, Rhs>
+ solve(const MatrixBase<Rhs>& b) const
+ {
+ eigen_assert(this->m_isInitialized && "JacobiSVD is not initialized.");
+ eigen_assert(SVDBase<MatrixType>::computeU() && SVDBase<MatrixType>::computeV() && "JacobiSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
+ return internal::solve_retval<JacobiSVD, Rhs>(*this, b.derived());
+ }
+
+
+
+ private:
+ void allocate(Index rows, Index cols, unsigned int computationOptions);
+
+ protected:
+ WorkMatrixType m_workMatrix;
+
+ template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
+ friend struct internal::svd_precondition_2x2_block_to_be_real;
+ template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
+ friend struct internal::qr_preconditioner_impl;
+
+ internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
+ internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
+};
+
+template<typename MatrixType, int QRPreconditioner>
+void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+ if (SVDBase<MatrixType>::allocate(rows, cols, computationOptions)) return;
+
+ if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
+ {
+ eigen_assert(!(this->m_computeThinU || this->m_computeThinV) &&
+ "JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
+ "Use the ColPivHouseholderQR preconditioner instead.");
+ }
+
+ m_workMatrix.resize(this->m_diagSize, this->m_diagSize);
+
+ if(this->m_cols>this->m_rows) m_qr_precond_morecols.allocate(*this);
+ if(this->m_rows>this->m_cols) m_qr_precond_morerows.allocate(*this);
+}
+
+template<typename MatrixType, int QRPreconditioner>
+SVDBase<MatrixType>&
+JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
+{
+ using std::abs;
+ allocate(matrix.rows(), matrix.cols(), computationOptions);
+
+ // currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
+ // only worsening the precision of U and V as we accumulate more rotations
+ const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
+
+ // limit for very small denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
+ const RealScalar considerAsZero = RealScalar(2) * std::numeric_limits<RealScalar>::denorm_min();
+
+ /*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
+
+ if(!m_qr_precond_morecols.run(*this, matrix) && !m_qr_precond_morerows.run(*this, matrix))
+ {
+ m_workMatrix = matrix.block(0,0,this->m_diagSize,this->m_diagSize);
+ if(this->m_computeFullU) this->m_matrixU.setIdentity(this->m_rows,this->m_rows);
+ if(this->m_computeThinU) this->m_matrixU.setIdentity(this->m_rows,this->m_diagSize);
+ if(this->m_computeFullV) this->m_matrixV.setIdentity(this->m_cols,this->m_cols);
+ if(this->m_computeThinV) this->m_matrixV.setIdentity(this->m_cols, this->m_diagSize);
+ }
+
+ /*** step 2. The main Jacobi SVD iteration. ***/
+
+ bool finished = false;
+ while(!finished)
+ {
+ finished = true;
+
+ // do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
+
+ for(Index p = 1; p < this->m_diagSize; ++p)
+ {
+ for(Index q = 0; q < p; ++q)
+ {
+ // if this 2x2 sub-matrix is not diagonal already...
+ // notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
+ // keep us iterating forever. Similarly, small denormal numbers are considered zero.
+ using std::max;
+ RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
+ abs(m_workMatrix.coeff(q,q))));
+ if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold)
+ {
+ finished = false;
+
+ // perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
+ internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q);
+ JacobiRotation<RealScalar> j_left, j_right;
+ internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
+
+ // accumulate resulting Jacobi rotations
+ m_workMatrix.applyOnTheLeft(p,q,j_left);
+ if(SVDBase<MatrixType>::computeU()) this->m_matrixU.applyOnTheRight(p,q,j_left.transpose());
+
+ m_workMatrix.applyOnTheRight(p,q,j_right);
+ if(SVDBase<MatrixType>::computeV()) this->m_matrixV.applyOnTheRight(p,q,j_right);
+ }
+ }
+ }
+ }
+
+ /*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
+
+ for(Index i = 0; i < this->m_diagSize; ++i)
+ {
+ RealScalar a = abs(m_workMatrix.coeff(i,i));
+ this->m_singularValues.coeffRef(i) = a;
+ if(SVDBase<MatrixType>::computeU() && (a!=RealScalar(0))) this->m_matrixU.col(i) *= this->m_workMatrix.coeff(i,i)/a;
+ }
+
+ /*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
+
+ this->m_nonzeroSingularValues = this->m_diagSize;
+ for(Index i = 0; i < this->m_diagSize; i++)
+ {
+ Index pos;
+ RealScalar maxRemainingSingularValue = this->m_singularValues.tail(this->m_diagSize-i).maxCoeff(&pos);
+ if(maxRemainingSingularValue == RealScalar(0))
+ {
+ this->m_nonzeroSingularValues = i;
+ break;
+ }
+ if(pos)
+ {
+ pos += i;
+ std::swap(this->m_singularValues.coeffRef(i), this->m_singularValues.coeffRef(pos));
+ if(SVDBase<MatrixType>::computeU()) this->m_matrixU.col(pos).swap(this->m_matrixU.col(i));
+ if(SVDBase<MatrixType>::computeV()) this->m_matrixV.col(pos).swap(this->m_matrixV.col(i));
+ }
+ }
+
+ this->m_isInitialized = true;
+ return *this;
+}
+
+namespace internal {
+template<typename _MatrixType, int QRPreconditioner, typename Rhs>
+struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+ : solve_retval_base<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
+{
+ typedef JacobiSVD<_MatrixType, QRPreconditioner> JacobiSVDType;
+ EIGEN_MAKE_SOLVE_HELPERS(JacobiSVDType,Rhs)
+
+ template<typename Dest> void evalTo(Dest& dst) const
+ {
+ eigen_assert(rhs().rows() == dec().rows());
+
+ // A = U S V^*
+ // So A^{-1} = V S^{-1} U^*
+
+ Index diagSize = (std::min)(dec().rows(), dec().cols());
+ typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
+
+ Index nonzeroSingVals = dec().nonzeroSingularValues();
+ invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
+ invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
+
+ dst = dec().matrixV().leftCols(diagSize)
+ * invertedSingVals.asDiagonal()
+ * dec().matrixU().leftCols(diagSize).adjoint()
+ * rhs();
+ }
+};
+} // end namespace internal
+
+/** \svd_module
+ *
+ * \return the singular value decomposition of \c *this computed by two-sided
+ * Jacobi transformations.
+ *
+ * \sa class JacobiSVD
+ */
+template<typename Derived>
+JacobiSVD<typename MatrixBase<Derived>::PlainObject>
+MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
+{
+ return JacobiSVD<PlainObject>(*this, computationOptions);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_JACOBISVD_H
diff --git a/unsupported/Eigen/src/SVD/SVDBase.h b/unsupported/Eigen/src/SVD/SVDBase.h
new file mode 100644
index 000000000..fd8af3b8c
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/SVDBase.h
@@ -0,0 +1,236 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
+//
+// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
+// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
+// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
+// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SVD_H
+#define EIGEN_SVD_H
+
+namespace Eigen {
+/** \ingroup SVD_Module
+ *
+ *
+ * \class SVDBase
+ *
+ * \brief Mother class of SVD classes algorithms
+ *
+ * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
+ * SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
+ * \f[ A = U S V^* \f]
+ * where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
+ * the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
+ * and right \em singular \em vectors of \a A respectively.
+ *
+ * Singular values are always sorted in decreasing order.
+ *
+ *
+ * You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
+ * smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
+ * singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
+ * and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
+ *
+ * If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
+ * terminate in finite (and reasonable) time.
+ * \sa MatrixBase::genericSvd()
+ */
+template<typename _MatrixType>
+class SVDBase
+{
+
+public:
+ typedef _MatrixType MatrixType;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
+ typedef typename MatrixType::Index Index;
+ enum {
+ RowsAtCompileTime = MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = MatrixType::ColsAtCompileTime,
+ DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
+ MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
+ MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
+ MatrixOptions = MatrixType::Options
+ };
+
+ typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime,
+ MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
+ MatrixUType;
+ typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime,
+ MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
+ MatrixVType;
+ typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
+ typedef typename internal::plain_row_type<MatrixType>::type RowType;
+ typedef typename internal::plain_col_type<MatrixType>::type ColType;
+ typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
+ MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
+ WorkMatrixType;
+
+
+
+
+ /** \brief Method performing the decomposition of given matrix using custom options.
+ *
+ * \param matrix the matrix to decompose
+ * \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
+ * By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
+ * #ComputeFullV, #ComputeThinV.
+ *
+ * Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
+ * available with the (non-default) FullPivHouseholderQR preconditioner.
+ */
+ SVDBase& compute(const MatrixType& matrix, unsigned int computationOptions);
+
+ /** \brief Method performing the decomposition of given matrix using current options.
+ *
+ * \param matrix the matrix to decompose
+ *
+ * This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
+ */
+ //virtual SVDBase& compute(const MatrixType& matrix) = 0;
+ SVDBase& compute(const MatrixType& matrix);
+
+ /** \returns the \a U matrix.
+ *
+ * For the SVDBase decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+ * the U matrix is n-by-n if you asked for #ComputeFullU, and is n-by-m if you asked for #ComputeThinU.
+ *
+ * The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
+ *
+ * This method asserts that you asked for \a U to be computed.
+ */
+ const MatrixUType& matrixU() const
+ {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
+ return m_matrixU;
+ }
+
+ /** \returns the \a V matrix.
+ *
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
+ * the V matrix is p-by-p if you asked for #ComputeFullV, and is p-by-m if you asked for ComputeThinV.
+ *
+ * The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
+ *
+ * This method asserts that you asked for \a V to be computed.
+ */
+ const MatrixVType& matrixV() const
+ {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
+ return m_matrixV;
+ }
+
+ /** \returns the vector of singular values.
+ *
+ * For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
+ * returned vector has size \a m. Singular values are always sorted in decreasing order.
+ */
+ const SingularValuesType& singularValues() const
+ {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ return m_singularValues;
+ }
+
+
+
+ /** \returns the number of singular values that are not exactly 0 */
+ Index nonzeroSingularValues() const
+ {
+ eigen_assert(m_isInitialized && "SVD is not initialized.");
+ return m_nonzeroSingularValues;
+ }
+
+
+ /** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
+ inline bool computeU() const { return m_computeFullU || m_computeThinU; }
+ /** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
+ inline bool computeV() const { return m_computeFullV || m_computeThinV; }
+
+
+ inline Index rows() const { return m_rows; }
+ inline Index cols() const { return m_cols; }
+
+
+protected:
+ // return true if already allocated
+ bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
+
+ MatrixUType m_matrixU;
+ MatrixVType m_matrixV;
+ SingularValuesType m_singularValues;
+ bool m_isInitialized, m_isAllocated;
+ bool m_computeFullU, m_computeThinU;
+ bool m_computeFullV, m_computeThinV;
+ unsigned int m_computationOptions;
+ Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
+
+
+ /** \brief Default Constructor.
+ *
+ * Default constructor of SVDBase
+ */
+ SVDBase()
+ : m_isInitialized(false),
+ m_isAllocated(false),
+ m_computationOptions(0),
+ m_rows(-1), m_cols(-1)
+ {}
+
+
+};
+
+
+template<typename MatrixType>
+bool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
+{
+ eigen_assert(rows >= 0 && cols >= 0);
+
+ if (m_isAllocated &&
+ rows == m_rows &&
+ cols == m_cols &&
+ computationOptions == m_computationOptions)
+ {
+ return true;
+ }
+
+ m_rows = rows;
+ m_cols = cols;
+ m_isInitialized = false;
+ m_isAllocated = true;
+ m_computationOptions = computationOptions;
+ m_computeFullU = (computationOptions & ComputeFullU) != 0;
+ m_computeThinU = (computationOptions & ComputeThinU) != 0;
+ m_computeFullV = (computationOptions & ComputeFullV) != 0;
+ m_computeThinV = (computationOptions & ComputeThinV) != 0;
+ eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U");
+ eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V");
+ eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
+ "SVDBase: thin U and V are only available when your matrix has a dynamic number of columns.");
+
+ m_diagSize = (std::min)(m_rows, m_cols);
+ m_singularValues.resize(m_diagSize);
+ if(RowsAtCompileTime==Dynamic)
+ m_matrixU.resize(m_rows, m_computeFullU ? m_rows
+ : m_computeThinU ? m_diagSize
+ : 0);
+ if(ColsAtCompileTime==Dynamic)
+ m_matrixV.resize(m_cols, m_computeFullV ? m_cols
+ : m_computeThinV ? m_diagSize
+ : 0);
+
+ return false;
+}
+
+}// end namespace
+
+#endif // EIGEN_SVD_H
diff --git a/unsupported/Eigen/src/SVD/TODOBdcsvd.txt b/unsupported/Eigen/src/SVD/TODOBdcsvd.txt
new file mode 100644
index 000000000..0bc9a46e6
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/TODOBdcsvd.txt
@@ -0,0 +1,29 @@
+TO DO LIST
+
+
+
+(optional optimization) - do all the allocations in the allocate part
+ - support static matrices
+ - return a error at compilation time when using integer matrices (int, long, std::complex<int>, ...)
+
+to finish the algorithm :
+ -implement the last part of the algorithm as described on the reference paper.
+ You may find more information on that part on this paper
+
+ -to replace the call to JacobiSVD at the end of the divide algorithm, just after the call to
+ deflation.
+
+(suggested step by step resolution)
+ 0) comment the call to Jacobi in the last part of the divide method and everything right after
+ until the end of the method. What is commented can be a guideline to steps 3) 4) and 6)
+ 1) solve the secular equation (Characteristic equation) on the values that are not null (zi!=0 and di!=0), after the deflation
+ wich should be uncommented in the divide method
+ 2) remember the values of the singular values that are already computed (zi=0)
+ 3) assign the singular values found in m_computed at the right places (with the ones found in step 2) )
+ in decreasing order
+ 4) set the firstcol to zero (except the first element) in m_computed
+ 5) compute all the singular vectors when CompV is set to true and only the left vectors when
+ CompV is set to false
+ 6) multiply naiveU and naiveV to the right by the matrices found, only naiveU when CompV is set to
+ false, /!\ if CompU is false NaiveU has only 2 rows
+ 7) delete everything commented in step 0)
diff --git a/unsupported/Eigen/src/SVD/doneInBDCSVD.txt b/unsupported/Eigen/src/SVD/doneInBDCSVD.txt
new file mode 100644
index 000000000..8563ddab8
--- /dev/null
+++ b/unsupported/Eigen/src/SVD/doneInBDCSVD.txt
@@ -0,0 +1,21 @@
+This unsupported package is about a divide and conquer algorithm to compute SVD.
+
+The implementation follows as closely as possible the following reference paper :
+http://www.cs.yale.edu/publications/techreports/tr933.pdf
+
+The code documentation uses the same names for variables as the reference paper. The code, deflation included, is
+working but there are a few things that could be optimised as explained in the TODOBdsvd.
+
+In the code comments were put at the line where would be the third step of the algorithm so one could simply add the call
+of a function doing the last part of the algorithm and that would not require any knowledge of the part we implemented.
+
+In the TODOBdcsvd we explain what is the main difficulty of the last part and suggest a reference paper to help solve it.
+
+The implemented has trouble with fixed size matrices.
+
+In the actual implementation, it returns matrices of zero when ask to do a svd on an int matrix.
+
+
+Paper for the third part:
+http://www.stat.uchicago.edu/~lekheng/courses/302/classics/greengard-rokhlin.pdf
+
diff --git a/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h b/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h
index fd24a732d..e9ec746e3 100644
--- a/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h
+++ b/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h
@@ -12,6 +12,12 @@
namespace Eigen {
+#if 0
+
+// NOTE Have to be reimplemented as a specialization of BlockImpl< DynamicSparseMatrix<_Scalar, _Options, _Index>, ... >
+// See SparseBlock.h for an example
+
+
/***************************************************************************
* specialisation for DynamicSparseMatrix
***************************************************************************/
@@ -109,6 +115,8 @@ class SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options, _Index>, Size>
};
+#endif
+
} // end namespace Eigen
#endif // EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H
diff --git a/unsupported/Eigen/src/SparseExtra/MarketIO.h b/unsupported/Eigen/src/SparseExtra/MarketIO.h
index de958de9f..7aafce928 100644
--- a/unsupported/Eigen/src/SparseExtra/MarketIO.h
+++ b/unsupported/Eigen/src/SparseExtra/MarketIO.h
@@ -116,7 +116,7 @@ inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscompl
std::string line;
// The matrix header is always the first line in the file
- std::getline(in, line); assert(in.good());
+ std::getline(in, line); eigen_assert(in.good());
std::stringstream fmtline(line);
std::string substr[5];
@@ -200,11 +200,11 @@ bool loadMarketVector(VectorType& vec, const std::string& filename)
int n(0), col(0);
do
{ // Skip comments
- std::getline(in, line); assert(in.good());
+ std::getline(in, line); eigen_assert(in.good());
} while (line[0] == '%');
std::istringstream newline(line);
newline >> n >> col;
- assert(n>0 && col>0);
+ eigen_assert(n>0 && col>0);
vec.resize(n);
int i = 0;
Scalar value;
diff --git a/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h b/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
index 4716b68e7..bf13cf21f 100644
--- a/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
+++ b/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h
@@ -184,9 +184,20 @@ class MatrixMarketIterator
// if (S_ISDIR(st_buf.st_mode)) continue;
// Determine from the header if it is a matrix or a right hand side
- bool isvector,iscomplex;
+ bool isvector,iscomplex=false;
if(!getMarketHeader(curfile,m_sym,iscomplex,isvector)) continue;
if(isvector) continue;
+ if (!iscomplex)
+ {
+ if(internal::is_same<Scalar, std::complex<float> >::value || internal::is_same<Scalar, std::complex<double> >::value)
+ continue;
+ }
+ if (iscomplex)
+ {
+ if(internal::is_same<Scalar, float>::value || internal::is_same<Scalar, double>::value)
+ continue;
+ }
+
// Get the matrix name
std::string filename = m_curs_id->d_name;
diff --git a/unsupported/Eigen/src/Splines/Spline.h b/unsupported/Eigen/src/Splines/Spline.h
index 3680f013a..771f10432 100644
--- a/unsupported/Eigen/src/Splines/Spline.h
+++ b/unsupported/Eigen/src/Splines/Spline.h
@@ -16,7 +16,7 @@ namespace Eigen
{
/**
* \ingroup Splines_Module
- * \class Spline class
+ * \class Spline
* \brief A class representing multi-dimensional spline curves.
*
* The class represents B-splines with non-uniform knot vectors. Each control
@@ -50,6 +50,21 @@ namespace Eigen
/** \brief The data type representing the spline's control points. */
typedef typename SplineTraits<Spline>::ControlPointVectorType ControlPointVectorType;
+
+ /**
+ * \brief Creates a (constant) zero spline.
+ * For Splines with dynamic degree, the resulting degree will be 0.
+ **/
+ Spline()
+ : m_knots(1, (Degree==Dynamic ? 2 : 2*Degree+2))
+ , m_ctrls(ControlPointVectorType::Zero(2,(Degree==Dynamic ? 1 : Degree+1)))
+ {
+ // in theory this code can go to the initializer list but it will get pretty
+ // much unreadable ...
+ enum { MinDegree = (Degree==Dynamic ? 0 : Degree) };
+ m_knots.template segment<MinDegree+1>(0) = Array<Scalar,1,MinDegree+1>::Zero();
+ m_knots.template segment<MinDegree+1>(MinDegree+1) = Array<Scalar,1,MinDegree+1>::Ones();
+ }
/**
* \brief Creates a spline from a knot vector and control points.
@@ -280,11 +295,7 @@ namespace Eigen
enum { Order = SplineTraits<SplineType>::OrderAtCompileTime };
enum { DerivativeOrder = DerivativeType::ColsAtCompileTime };
- typedef typename SplineTraits<SplineType>::Scalar Scalar;
-
- typedef typename SplineTraits<SplineType>::BasisVectorType BasisVectorType;
typedef typename SplineTraits<SplineType>::ControlPointVectorType ControlPointVectorType;
-
typedef typename SplineTraits<SplineType,DerivativeOrder>::BasisDerivativeType BasisDerivativeType;
typedef typename BasisDerivativeType::ConstRowXpr BasisDerivativeRowXpr;
@@ -343,7 +354,6 @@ namespace Eigen
typedef typename SplineTraits<SplineType>::Scalar Scalar;
typedef typename SplineTraits<SplineType>::BasisVectorType BasisVectorType;
typedef typename SplineTraits<SplineType>::KnotVectorType KnotVectorType;
- typedef typename SplineTraits<SplineType>::ControlPointVectorType ControlPointVectorType;
const KnotVectorType& U = spline.knots();
diff --git a/unsupported/Eigen/src/Splines/SplineFitting.h b/unsupported/Eigen/src/Splines/SplineFitting.h
index 1b566332f..0265d532c 100644
--- a/unsupported/Eigen/src/Splines/SplineFitting.h
+++ b/unsupported/Eigen/src/Splines/SplineFitting.h
@@ -40,8 +40,6 @@ namespace Eigen
template <typename KnotVectorType>
void KnotAveraging(const KnotVectorType& parameters, DenseIndex degree, KnotVectorType& knots)
{
- typedef typename KnotVectorType::Scalar Scalar;
-
knots.resize(parameters.size()+degree+1);
for (DenseIndex j=1; j<parameters.size()-degree; ++j)
@@ -118,7 +116,6 @@ namespace Eigen
SplineType SplineFitting<SplineType>::Interpolate(const PointArrayType& pts, DenseIndex degree, const KnotVectorType& knot_parameters)
{
typedef typename SplineType::KnotVectorType::Scalar Scalar;
- typedef typename SplineType::BasisVectorType BasisVectorType;
typedef typename SplineType::ControlPointVectorType ControlPointVectorType;
typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;