aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Eigen/Core2
-rw-r--r--Eigen/src/Cholesky/LDLT.h7
-rw-r--r--Eigen/src/Cholesky/LLT.h8
-rw-r--r--Eigen/src/Cholesky/LLT_MKL.h2
-rw-r--r--Eigen/src/Core/Assign.h15
-rw-r--r--Eigen/src/Core/Block.h5
-rw-r--r--Eigen/src/Core/DenseBase.h8
-rw-r--r--Eigen/src/Core/DiagonalProduct.h2
-rw-r--r--Eigen/src/Core/Functors.h41
-rw-r--r--Eigen/src/Core/MapBase.h2
-rw-r--r--Eigen/src/Core/MatrixBase.h2
-rw-r--r--Eigen/src/Core/PermutationMatrix.h29
-rw-r--r--Eigen/src/Core/PlainObjectBase.h2
-rw-r--r--Eigen/src/Core/Ref.h16
-rw-r--r--Eigen/src/Core/ReturnByValue.h11
-rw-r--r--Eigen/src/Core/products/CoeffBasedProduct.h73
-rw-r--r--Eigen/src/Core/products/Parallelizer.h17
-rw-r--r--Eigen/src/Core/util/Constants.h13
-rw-r--r--Eigen/src/Core/util/Macros.h12
-rw-r--r--Eigen/src/Core/util/Memory.h2
-rw-r--r--Eigen/src/Core/util/NonMPL2.h7
-rw-r--r--Eigen/src/Eigenvalues/ComplexEigenSolver.h8
-rw-r--r--Eigen/src/Eigenvalues/EigenSolver.h9
-rw-r--r--Eigen/src/Eigenvalues/GeneralizedEigenSolver.h9
-rw-r--r--Eigen/src/Eigenvalues/RealQZ.h12
-rw-r--r--Eigen/src/Eigenvalues/RealSchur.h12
-rw-r--r--Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h208
-rw-r--r--Eigen/src/Geometry/AlignedBox.h83
-rw-r--r--Eigen/src/Geometry/Homogeneous.h2
-rw-r--r--Eigen/src/Geometry/Quaternion.h22
-rw-r--r--Eigen/src/IterativeLinearSolvers/BiCGSTAB.h15
-rw-r--r--Eigen/src/IterativeLinearSolvers/ConjugateGradient.h30
-rw-r--r--Eigen/src/IterativeLinearSolvers/IncompleteLUT.h4
-rw-r--r--Eigen/src/LU/FullPivLU.h8
-rw-r--r--Eigen/src/LU/PartialPivLU.h8
-rw-r--r--Eigen/src/OrderingMethods/Amd.h16
-rw-r--r--Eigen/src/QR/ColPivHouseholderQR.h32
-rw-r--r--Eigen/src/QR/FullPivHouseholderQR.h8
-rw-r--r--Eigen/src/QR/HouseholderQR.h98
-rw-r--r--Eigen/src/QR/HouseholderQR_MKL.h30
-rw-r--r--Eigen/src/SPQRSupport/SuiteSparseQRSupport.h60
-rw-r--r--Eigen/src/SVD/JacobiSVD.h7
-rw-r--r--Eigen/src/SparseCore/SparseBlock.h46
-rw-r--r--Eigen/src/SparseCore/SparseDenseProduct.h2
-rw-r--r--Eigen/src/SparseCore/SparseMatrixBase.h6
-rw-r--r--Eigen/src/SparseCore/TriangularSolver.h2
-rw-r--r--Eigen/src/SparseLU/SparseLU.h81
-rw-r--r--Eigen/src/SparseLU/SparseLU_pivotL.h6
-rw-r--r--Eigen/src/plugins/ArrayCwiseBinaryOps.h54
-rw-r--r--Eigen/src/plugins/ArrayCwiseUnaryOps.h16
-rw-r--r--Eigen/src/plugins/MatrixCwiseBinaryOps.h17
-rw-r--r--Eigen/src/plugins/MatrixCwiseUnaryOps.h15
-rw-r--r--blas/xerbla.cpp2
-rw-r--r--cmake/EigenTesting.cmake13
-rw-r--r--cmake/FindMetis.cmake2
-rw-r--r--doc/CMakeLists.txt3
-rw-r--r--doc/Doxyfile.in14
-rw-r--r--doc/TopicMultithreading.dox2
-rw-r--r--doc/special_examples/CMakeLists.txt7
-rw-r--r--failtest/CMakeLists.txt11
-rw-r--r--failtest/colpivqr_int.cpp14
-rw-r--r--failtest/eigensolver_cplx.cpp14
-rw-r--r--failtest/eigensolver_int.cpp14
-rw-r--r--failtest/fullpivlu_int.cpp14
-rw-r--r--failtest/fullpivqr_int.cpp14
-rw-r--r--failtest/jacobisvd_int.cpp14
-rw-r--r--failtest/ldlt_int.cpp14
-rw-r--r--failtest/llt_int.cpp14
-rw-r--r--failtest/partialpivlu_int.cpp14
-rw-r--r--failtest/qr_int.cpp14
-rw-r--r--test/CMakeLists.txt6
-rw-r--r--test/array.cpp2
-rw-r--r--test/array_for_matrix.cpp1
-rw-r--r--test/conjugate_gradient.cpp6
-rw-r--r--test/lu.cpp9
-rw-r--r--test/mapped_matrix.cpp24
-rw-r--r--test/product_extra.cpp64
-rw-r--r--test/product_mmtr.cpp4
-rw-r--r--test/real_qz.cpp16
-rw-r--r--test/ref.cpp24
-rw-r--r--test/sparse_basic.cpp65
-rw-r--r--test/sparse_solver.h5
-rw-r--r--test/sparselu.cpp3
-rw-r--r--unsupported/Eigen/CMakeLists.txt6
-rw-r--r--unsupported/Eigen/src/CMakeLists.txt3
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/GMRES.h15
-rw-r--r--unsupported/Eigen/src/IterativeSolvers/MINRES.h70
-rw-r--r--unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt2
-rw-r--r--unsupported/test/minres.cpp25
-rw-r--r--unsupported/test/mpreal/mpreal.h3
90 files changed, 1241 insertions, 508 deletions
diff --git a/Eigen/Core b/Eigen/Core
index c87f99df3..509c529e1 100644
--- a/Eigen/Core
+++ b/Eigen/Core
@@ -123,7 +123,7 @@
#undef bool
#undef vector
#undef pixel
- #elif defined __ARM_NEON__
+ #elif defined __ARM_NEON
#define EIGEN_VECTORIZE
#define EIGEN_VECTORIZE_NEON
#include <arm_neon.h>
diff --git a/Eigen/src/Cholesky/LDLT.h b/Eigen/src/Cholesky/LDLT.h
index 02ab93880..e01ae8233 100644
--- a/Eigen/src/Cholesky/LDLT.h
+++ b/Eigen/src/Cholesky/LDLT.h
@@ -235,6 +235,11 @@ template<typename _MatrixType, int _UpLo> class LDLT
}
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
/** \internal
* Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U.
@@ -434,6 +439,8 @@ template<typename MatrixType> struct LDLT_Traits<MatrixType,Upper>
template<typename MatrixType, int _UpLo>
LDLT<MatrixType,_UpLo>& LDLT<MatrixType,_UpLo>::compute(const MatrixType& a)
{
+ check_template_parameters();
+
eigen_assert(a.rows()==a.cols());
const Index size = a.rows();
diff --git a/Eigen/src/Cholesky/LLT.h b/Eigen/src/Cholesky/LLT.h
index 2e6189f7d..59723a63d 100644
--- a/Eigen/src/Cholesky/LLT.h
+++ b/Eigen/src/Cholesky/LLT.h
@@ -174,6 +174,12 @@ template<typename _MatrixType, int _UpLo> class LLT
LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1);
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
/** \internal
* Used to compute and store L
* The strict upper part is not used and even not initialized.
@@ -384,6 +390,8 @@ template<typename MatrixType> struct LLT_Traits<MatrixType,Upper>
template<typename MatrixType, int _UpLo>
LLT<MatrixType,_UpLo>& LLT<MatrixType,_UpLo>::compute(const MatrixType& a)
{
+ check_template_parameters();
+
eigen_assert(a.rows()==a.cols());
const Index size = a.rows();
m_matrix.resize(size, size);
diff --git a/Eigen/src/Cholesky/LLT_MKL.h b/Eigen/src/Cholesky/LLT_MKL.h
index 64daa445c..66675d747 100644
--- a/Eigen/src/Cholesky/LLT_MKL.h
+++ b/Eigen/src/Cholesky/LLT_MKL.h
@@ -60,7 +60,7 @@ template<> struct mkl_llt<EIGTYPE> \
lda = m.outerStride(); \
\
info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \
- info = (info==0) ? Success : NumericalIssue; \
+ info = (info==0) ? -1 : info>0 ? info-1 : size; \
return info; \
} \
}; \
diff --git a/Eigen/src/Core/Assign.h b/Eigen/src/Core/Assign.h
index 1dccc2f42..bcfc261e5 100644
--- a/Eigen/src/Core/Assign.h
+++ b/Eigen/src/Core/Assign.h
@@ -439,19 +439,26 @@ struct assign_impl<Derived1, Derived2, SliceVectorizedTraversal, NoUnrolling, Ve
typedef typename Derived1::Index Index;
static inline void run(Derived1 &dst, const Derived2 &src)
{
- typedef packet_traits<typename Derived1::Scalar> PacketTraits;
+ typedef typename Derived1::Scalar Scalar;
+ typedef packet_traits<Scalar> PacketTraits;
enum {
packetSize = PacketTraits::size,
alignable = PacketTraits::AlignedOnScalar,
- dstAlignment = alignable ? Aligned : int(assign_traits<Derived1,Derived2>::DstIsAligned) ,
+ dstIsAligned = assign_traits<Derived1,Derived2>::DstIsAligned,
+ dstAlignment = alignable ? Aligned : int(dstIsAligned),
srcAlignment = assign_traits<Derived1,Derived2>::JointAlignment
};
+ const Scalar *dst_ptr = &dst.coeffRef(0,0);
+ if((!bool(dstIsAligned)) && (Index(dst_ptr) % sizeof(Scalar))>0)
+ {
+ // the pointer is not aligend-on scalar, so alignment is not possible
+ return assign_impl<Derived1,Derived2,DefaultTraversal,NoUnrolling>::run(dst, src);
+ }
const Index packetAlignedMask = packetSize - 1;
const Index innerSize = dst.innerSize();
const Index outerSize = dst.outerSize();
const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
- Index alignedStart = ((!alignable) || assign_traits<Derived1,Derived2>::DstIsAligned) ? 0
- : internal::first_aligned(&dst.coeffRef(0,0), innerSize);
+ Index alignedStart = ((!alignable) || bool(dstIsAligned)) ? 0 : internal::first_aligned(dst_ptr, innerSize);
for(Index outer = 0; outer < outerSize; ++outer)
{
diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h
index 87bedfa46..827894443 100644
--- a/Eigen/src/Core/Block.h
+++ b/Eigen/src/Core/Block.h
@@ -66,8 +66,9 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
: ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime)
: int(traits<XprType>::MaxColsAtCompileTime),
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0,
- IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
- : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
+ IsDense = is_same<StorageKind,Dense>::value,
+ IsRowMajor = (IsDense&&MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1
+ : (IsDense&&MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0
: XprTypeIsRowMajor,
HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor),
InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
diff --git a/Eigen/src/Core/DenseBase.h b/Eigen/src/Core/DenseBase.h
index 04862f374..32f60d8dc 100644
--- a/Eigen/src/Core/DenseBase.h
+++ b/Eigen/src/Core/DenseBase.h
@@ -266,11 +266,13 @@ template<typename Derived> class DenseBase
template<typename OtherDerived>
Derived& operator=(const ReturnByValue<OtherDerived>& func);
-#ifndef EIGEN_PARSED_BY_DOXYGEN
- /** Copies \a other into *this without evaluating other. \returns a reference to *this. */
+ /** \internal Copies \a other into *this without evaluating other. \returns a reference to *this. */
template<typename OtherDerived>
Derived& lazyAssign(const DenseBase<OtherDerived>& other);
-#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+ /** \internal Evaluates \a other into *this. \returns a reference to *this. */
+ template<typename OtherDerived>
+ Derived& lazyAssign(const ReturnByValue<OtherDerived>& other);
CommaInitializer<Derived> operator<< (const Scalar& s);
diff --git a/Eigen/src/Core/DiagonalProduct.h b/Eigen/src/Core/DiagonalProduct.h
index c03a0c2e1..00f8f2915 100644
--- a/Eigen/src/Core/DiagonalProduct.h
+++ b/Eigen/src/Core/DiagonalProduct.h
@@ -34,7 +34,7 @@ struct traits<DiagonalProduct<MatrixType, DiagonalType, ProductOrder> >
_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && (_ScalarAccessOnDiag || (bool(int(DiagonalType::DiagonalVectorType::Flags)&PacketAccessBit))),
_LinearAccessMask = (RowsAtCompileTime==1 || ColsAtCompileTime==1) ? LinearAccessBit : 0,
- Flags = ((HereditaryBits|_LinearAccessMask) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0) | AlignedBit,//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
+ Flags = ((HereditaryBits|_LinearAccessMask|AlignedBit) & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0),//(int(MatrixType::Flags)&int(DiagonalType::DiagonalVectorType::Flags)&AlignedBit),
CoeffReadCost = NumTraits<Scalar>::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost
};
};
diff --git a/Eigen/src/Core/Functors.h b/Eigen/src/Core/Functors.h
index b08b967ff..5f14c6587 100644
--- a/Eigen/src/Core/Functors.h
+++ b/Eigen/src/Core/Functors.h
@@ -259,6 +259,47 @@ template<> struct functor_traits<scalar_boolean_or_op> {
};
};
+/** \internal
+ * \brief Template functors for comparison of two scalars
+ * \todo Implement packet-comparisons
+ */
+template<typename Scalar, ComparisonName cmp> struct scalar_cmp_op;
+
+template<typename Scalar, ComparisonName cmp>
+struct functor_traits<scalar_cmp_op<Scalar, cmp> > {
+ enum {
+ Cost = NumTraits<Scalar>::AddCost,
+ PacketAccess = false
+ };
+};
+
+template<ComparisonName Cmp, typename Scalar>
+struct result_of<scalar_cmp_op<Scalar, Cmp>(Scalar,Scalar)> {
+ typedef bool type;
+};
+
+
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_EQ> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+ EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a==b;}
+};
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LT> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+ EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<b;}
+};
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_LE> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+ EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a<=b;}
+};
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_UNORD> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+ EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return !(a<=b || b<=a);}
+};
+template<typename Scalar> struct scalar_cmp_op<Scalar, cmp_NEQ> {
+ EIGEN_EMPTY_STRUCT_CTOR(scalar_cmp_op)
+ EIGEN_STRONG_INLINE bool operator()(const Scalar& a, const Scalar& b) const {return a!=b;}
+};
+
// unary functors:
/** \internal
diff --git a/Eigen/src/Core/MapBase.h b/Eigen/src/Core/MapBase.h
index cebed2bb6..92e114129 100644
--- a/Eigen/src/Core/MapBase.h
+++ b/Eigen/src/Core/MapBase.h
@@ -157,7 +157,7 @@ template<typename Derived> class MapBase<Derived, ReadOnlyAccessors>
internal::inner_stride_at_compile_time<Derived>::ret==1),
PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
eigen_assert(EIGEN_IMPLIES(internal::traits<Derived>::Flags&AlignedBit, (size_t(m_data) % 16) == 0)
- && "data is not aligned");
+ && "input pointer is not aligned on a 16 byte boundary");
}
PointerType m_data;
diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h
index cc3279746..b67a7c119 100644
--- a/Eigen/src/Core/MatrixBase.h
+++ b/Eigen/src/Core/MatrixBase.h
@@ -159,13 +159,11 @@ template<typename Derived> class MatrixBase
template<typename OtherDerived>
Derived& operator=(const ReturnByValue<OtherDerived>& other);
-#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename ProductDerived, typename Lhs, typename Rhs>
Derived& lazyAssign(const ProductBase<ProductDerived, Lhs,Rhs>& other);
template<typename MatrixPower, typename Lhs, typename Rhs>
Derived& lazyAssign(const MatrixPowerProduct<MatrixPower, Lhs,Rhs>& other);
-#endif // not EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
Derived& operator+=(const MatrixBase<OtherDerived>& other);
diff --git a/Eigen/src/Core/PermutationMatrix.h b/Eigen/src/Core/PermutationMatrix.h
index f26f3e5cc..85ffae265 100644
--- a/Eigen/src/Core/PermutationMatrix.h
+++ b/Eigen/src/Core/PermutationMatrix.h
@@ -250,6 +250,35 @@ class PermutationBase : public EigenBase<Derived>
template<typename Other> friend
inline PlainPermutationType operator*(const Transpose<PermutationBase<Other> >& other, const PermutationBase& perm)
{ return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); }
+
+ /** \returns the determinant of the permutation matrix, which is either 1 or -1 depending on the parity of the permutation.
+ *
+ * This function is O(\c n) procedure allocating a buffer of \c n booleans.
+ */
+ Index determinant() const
+ {
+ Index res = 1;
+ Index n = size();
+ Matrix<bool,RowsAtCompileTime,1,0,MaxRowsAtCompileTime> mask(n);
+ mask.fill(false);
+ Index r = 0;
+ while(r < n)
+ {
+ // search for the next seed
+ while(r<n && mask[r]) r++;
+ if(r>=n)
+ break;
+ // we got one, let's follow it until we are back to the seed
+ Index k0 = r++;
+ mask.coeffRef(k0) = true;
+ for(Index k=indices().coeff(k0); k!=k0; k=indices().coeff(k))
+ {
+ mask.coeffRef(k) = true;
+ res = -res;
+ }
+ }
+ return res;
+ }
protected:
diff --git a/Eigen/src/Core/PlainObjectBase.h b/Eigen/src/Core/PlainObjectBase.h
index dd34b59e5..8a3e4545a 100644
--- a/Eigen/src/Core/PlainObjectBase.h
+++ b/Eigen/src/Core/PlainObjectBase.h
@@ -573,6 +573,8 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
: (rows() == other.rows() && cols() == other.cols())))
&& "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
EIGEN_ONLY_USED_FOR_DEBUG(other);
+ if(this->size()==0)
+ resizeLike(other);
#else
resizeLike(other);
#endif
diff --git a/Eigen/src/Core/Ref.h b/Eigen/src/Core/Ref.h
index df87b1af4..f53674cff 100644
--- a/Eigen/src/Core/Ref.h
+++ b/Eigen/src/Core/Ref.h
@@ -108,7 +108,8 @@ struct traits<Ref<_PlainObjectType, _Options, _StrideType> >
OuterStrideMatch = Derived::IsVectorAtCompileTime
|| int(StrideType::OuterStrideAtCompileTime)==int(Dynamic) || int(StrideType::OuterStrideAtCompileTime)==int(Derived::OuterStrideAtCompileTime),
AlignmentMatch = (_Options!=Aligned) || ((PlainObjectType::Flags&AlignedBit)==0) || ((traits<Derived>::Flags&AlignedBit)==AlignedBit),
- MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch
+ ScalarTypeMatch = internal::is_same<typename PlainObjectType::Scalar, typename Derived::Scalar>::value,
+ MatchAtCompileTime = HasDirectAccess && StorageOrderMatch && InnerStrideMatch && OuterStrideMatch && AlignmentMatch && ScalarTypeMatch
};
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
};
@@ -187,9 +188,11 @@ protected:
template<typename PlainObjectType, int Options, typename StrideType> class Ref
: public RefBase<Ref<PlainObjectType, Options, StrideType> >
{
+ private:
typedef internal::traits<Ref> Traits;
template<typename Derived>
- inline Ref(const PlainObjectBase<Derived>& expr);
+ inline Ref(const PlainObjectBase<Derived>& expr,
+ typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0);
public:
typedef RefBase<Ref> Base;
@@ -198,13 +201,15 @@ template<typename PlainObjectType, int Options, typename StrideType> class Ref
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename Derived>
- inline Ref(PlainObjectBase<Derived>& expr)
+ inline Ref(PlainObjectBase<Derived>& expr,
+ typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
{
EIGEN_STATIC_ASSERT(static_cast<bool>(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
Base::construct(expr.derived());
}
template<typename Derived>
- inline Ref(const DenseBase<Derived>& expr)
+ inline Ref(const DenseBase<Derived>& expr,
+ typename internal::enable_if<bool(Traits::template match<Derived>::MatchAtCompileTime),Derived>::type* = 0)
#else
template<typename Derived>
inline Ref(DenseBase<Derived>& expr)
@@ -231,7 +236,8 @@ template<typename TPlainObjectType, int Options, typename StrideType> class Ref<
EIGEN_DENSE_PUBLIC_INTERFACE(Ref)
template<typename Derived>
- inline Ref(const DenseBase<Derived>& expr)
+ inline Ref(const DenseBase<Derived>& expr,
+ typename internal::enable_if<bool(Traits::template match<Derived>::ScalarTypeMatch),Derived>::type* = 0)
{
// std::cout << match_helper<Derived>::HasDirectAccess << "," << match_helper<Derived>::OuterStrideMatch << "," << match_helper<Derived>::InnerStrideMatch << "\n";
// std::cout << int(StrideType::OuterStrideAtCompileTime) << " - " << int(Derived::OuterStrideAtCompileTime) << "\n";
diff --git a/Eigen/src/Core/ReturnByValue.h b/Eigen/src/Core/ReturnByValue.h
index d66c24ba0..f635598dc 100644
--- a/Eigen/src/Core/ReturnByValue.h
+++ b/Eigen/src/Core/ReturnByValue.h
@@ -72,6 +72,8 @@ template<typename Derived> class ReturnByValue
const Unusable& coeff(Index,Index) const { return *reinterpret_cast<const Unusable*>(this); }
Unusable& coeffRef(Index) { return *reinterpret_cast<Unusable*>(this); }
Unusable& coeffRef(Index,Index) { return *reinterpret_cast<Unusable*>(this); }
+ template<int LoadMode> Unusable& packet(Index) const;
+ template<int LoadMode> Unusable& packet(Index, Index) const;
#endif
};
@@ -83,6 +85,15 @@ Derived& DenseBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
return derived();
}
+template<typename Derived>
+template<typename OtherDerived>
+Derived& DenseBase<Derived>::lazyAssign(const ReturnByValue<OtherDerived>& other)
+{
+ other.evalTo(derived());
+ return derived();
+}
+
+
} // end namespace Eigen
#endif // EIGEN_RETURNBYVALUE_H
diff --git a/Eigen/src/Core/products/CoeffBasedProduct.h b/Eigen/src/Core/products/CoeffBasedProduct.h
index 421f925e1..2a9d65b94 100644
--- a/Eigen/src/Core/products/CoeffBasedProduct.h
+++ b/Eigen/src/Core/products/CoeffBasedProduct.h
@@ -134,7 +134,7 @@ class CoeffBasedProduct
};
typedef internal::product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
- Unroll ? (InnerSize==0 ? 0 : InnerSize-1) : Dynamic,
+ Unroll ? InnerSize : Dynamic,
_LhsNested, _RhsNested, Scalar> ScalarCoeffImpl;
typedef CoeffBasedProduct<LhsNested,RhsNested,NestByRefBit> LazyCoeffBasedProductType;
@@ -185,7 +185,7 @@ class CoeffBasedProduct
{
PacketScalar res;
internal::product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
- Unroll ? (InnerSize==0 ? 0 : InnerSize-1) : Dynamic,
+ Unroll ? InnerSize : Dynamic,
_LhsNested, _RhsNested, PacketScalar, LoadMode>
::run(row, col, m_lhs, m_rhs, res);
return res;
@@ -243,12 +243,12 @@ struct product_coeff_impl<DefaultTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{
product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, res);
- res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col);
+ res += lhs.coeff(row, UnrollingIndex-1) * rhs.coeff(UnrollingIndex-1, col);
}
};
template<typename Lhs, typename Rhs, typename RetScalar>
-struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
+struct product_coeff_impl<DefaultTraversal, 1, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
@@ -258,6 +258,16 @@ struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
};
template<typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
+{
+ typedef typename Lhs::Index Index;
+ static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
+ {
+ res = RetScalar(0);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename RetScalar>
struct product_coeff_impl<DefaultTraversal, Dynamic, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::Index Index;
@@ -293,6 +303,16 @@ struct product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet>
}
};
+template<typename Lhs, typename Rhs, typename RetScalar>
+struct product_coeff_impl<InnerVectorizedTraversal, 0, Lhs, Rhs, RetScalar>
+{
+ typedef typename Lhs::Index Index;
+ static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, RetScalar &res)
+ {
+ res = 0;
+ }
+};
+
template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
{
@@ -302,8 +322,7 @@ struct product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, Re
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, RetScalar &res)
{
Packet pres;
- product_coeff_vectorized_unroller<UnrollingIndex+1-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
- product_coeff_impl<DefaultTraversal,UnrollingIndex,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, res);
+ product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, pres);
res = predux(pres);
}
};
@@ -371,7 +390,7 @@ struct product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
{
product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
- res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex)), rhs.template packet<LoadMode>(UnrollingIndex, col), res);
+ res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex-1)), rhs.template packet<LoadMode>(UnrollingIndex-1, col), res);
}
};
@@ -382,12 +401,12 @@ struct product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
{
product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, res);
- res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex), pset1<Packet>(rhs.coeff(UnrollingIndex, col)), res);
+ res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex-1), pset1<Packet>(rhs.coeff(UnrollingIndex-1, col)), res);
}
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
-struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
+struct product_packet_impl<RowMajor, 1, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
@@ -397,7 +416,7 @@ struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
-struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
+struct product_packet_impl<ColMajor, 1, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet &res)
@@ -407,15 +426,34 @@ struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
+{
+ typedef typename Lhs::Index Index;
+ static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
+ {
+ res = pset1<Packet>(0);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
+struct product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
+{
+ typedef typename Lhs::Index Index;
+ static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Packet &res)
+ {
+ res = pset1<Packet>(0);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
{
- eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
- res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
- for(Index i = 1; i < lhs.cols(); ++i)
- res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
+ res = pset1<Packet>(0);
+ for(Index i = 0; i < lhs.cols(); ++i)
+ res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
}
};
@@ -425,10 +463,9 @@ struct product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Packet& res)
{
- eigen_assert(lhs.cols()>0 && "you are using a non initialized matrix");
- res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
- for(Index i = 1; i < lhs.cols(); ++i)
- res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
+ res = pset1<Packet>(0);
+ for(Index i = 0; i < lhs.cols(); ++i)
+ res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
}
};
diff --git a/Eigen/src/Core/products/Parallelizer.h b/Eigen/src/Core/products/Parallelizer.h
index 5c3e9b7ac..6937ee332 100644
--- a/Eigen/src/Core/products/Parallelizer.h
+++ b/Eigen/src/Core/products/Parallelizer.h
@@ -125,19 +125,22 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos
if(transpose)
std::swap(rows,cols);
- Index blockCols = (cols / threads) & ~Index(0x3);
- Index blockRows = (rows / threads) & ~Index(0x7);
-
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
- #pragma omp parallel for schedule(static,1) num_threads(threads)
- for(Index i=0; i<threads; ++i)
+ #pragma omp parallel num_threads(threads)
{
+ Index i = omp_get_thread_num();
+ // Note that the actual number of threads might be lower than the number of request ones.
+ Index actual_threads = omp_get_num_threads();
+
+ Index blockCols = (cols / actual_threads) & ~Index(0x3);
+ Index blockRows = (rows / actual_threads) & ~Index(0x7);
+
Index r0 = i*blockRows;
- Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
+ Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
- Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
+ Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
info[i].rhs_start = c0;
info[i].rhs_length = actualBlockCols;
diff --git a/Eigen/src/Core/util/Constants.h b/Eigen/src/Core/util/Constants.h
index 14b9624e1..1e6277c4f 100644
--- a/Eigen/src/Core/util/Constants.h
+++ b/Eigen/src/Core/util/Constants.h
@@ -433,6 +433,19 @@ struct MatrixXpr {};
/** The type used to identify an array expression */
struct ArrayXpr {};
+namespace internal {
+ /** \internal
+ * Constants for comparison functors
+ */
+ enum ComparisonName {
+ cmp_EQ = 0,
+ cmp_LT = 1,
+ cmp_LE = 2,
+ cmp_UNORD = 3,
+ cmp_NEQ = 4
+ };
+}
+
} // end namespace Eigen
#endif // EIGEN_CONSTANTS_H
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index 6d1e6c133..f69970f05 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -13,7 +13,7 @@
#define EIGEN_WORLD_VERSION 3
#define EIGEN_MAJOR_VERSION 2
-#define EIGEN_MINOR_VERSION 4
+#define EIGEN_MINOR_VERSION 5
#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \
(EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \
@@ -278,6 +278,7 @@ namespace Eigen {
#error Please tell me what is the equivalent of __attribute__((aligned(n))) for your compiler
#endif
+#define EIGEN_ALIGN8 EIGEN_ALIGN_TO_BOUNDARY(8)
#define EIGEN_ALIGN16 EIGEN_ALIGN_TO_BOUNDARY(16)
#if EIGEN_ALIGN_STATICALLY
@@ -313,7 +314,7 @@ namespace Eigen {
// just an empty macro !
#define EIGEN_EMPTY
-#if defined(_MSC_VER) && (_MSC_VER < 1900) && (!defined(__INTEL_COMPILER))
+#if defined(_MSC_VER) && (_MSC_VER < 1800) && (!defined(__INTEL_COMPILER))
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
using Base::operator =;
#elif defined(__clang__) // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
@@ -332,8 +333,11 @@ namespace Eigen {
}
#endif
-#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
- EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
+/** \internal
+ * \brief Macro to manually inherit assignment operators.
+ * This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined.
+ */
+#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived)
/**
* Just a side note. Commenting within defines works only by documenting
diff --git a/Eigen/src/Core/util/Memory.h b/Eigen/src/Core/util/Memory.h
index 624b17bbd..754e29bc5 100644
--- a/Eigen/src/Core/util/Memory.h
+++ b/Eigen/src/Core/util/Memory.h
@@ -523,7 +523,7 @@ template<typename T> struct smart_copy_helper<T,false> {
// you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
// to the appropriate stack allocation function
#ifndef EIGEN_ALLOCA
- #if (defined __linux__)
+ #if (defined __linux__) || (defined __APPLE__) || (defined alloca)
#define EIGEN_ALLOCA alloca
#elif defined(_MSC_VER)
#define EIGEN_ALLOCA _alloca
diff --git a/Eigen/src/Core/util/NonMPL2.h b/Eigen/src/Core/util/NonMPL2.h
index 629537a5e..1af67cf18 100644
--- a/Eigen/src/Core/util/NonMPL2.h
+++ b/Eigen/src/Core/util/NonMPL2.h
@@ -1,6 +1,3 @@
-// Force error if including non MPL2 license code regardless
-// EIGEN_MPL2_ONLY flag.
-
-//#ifdef EIGEN_MPL2_ONLY
+#ifdef EIGEN_MPL2_ONLY
#error Including non-MPL2 code in EIGEN_MPL2_ONLY mode
-//#endif
+#endif
diff --git a/Eigen/src/Eigenvalues/ComplexEigenSolver.h b/Eigen/src/Eigenvalues/ComplexEigenSolver.h
index af434bc9b..417c72944 100644
--- a/Eigen/src/Eigenvalues/ComplexEigenSolver.h
+++ b/Eigen/src/Eigenvalues/ComplexEigenSolver.h
@@ -234,6 +234,12 @@ template<typename _MatrixType> class ComplexEigenSolver
}
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
EigenvectorType m_eivec;
EigenvalueType m_eivalues;
ComplexSchur<MatrixType> m_schur;
@@ -251,6 +257,8 @@ template<typename MatrixType>
ComplexEigenSolver<MatrixType>&
ComplexEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
{
+ check_template_parameters();
+
// this code is inspired from Jampack
eigen_assert(matrix.cols() == matrix.rows());
diff --git a/Eigen/src/Eigenvalues/EigenSolver.h b/Eigen/src/Eigenvalues/EigenSolver.h
index 6e7150685..20c59a7a2 100644
--- a/Eigen/src/Eigenvalues/EigenSolver.h
+++ b/Eigen/src/Eigenvalues/EigenSolver.h
@@ -298,6 +298,13 @@ template<typename _MatrixType> class EigenSolver
void doComputeEigenvectors();
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
+ }
+
MatrixType m_eivec;
EigenvalueType m_eivalues;
bool m_isInitialized;
@@ -364,6 +371,8 @@ template<typename MatrixType>
EigenSolver<MatrixType>&
EigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
{
+ check_template_parameters();
+
using std::sqrt;
using std::abs;
eigen_assert(matrix.cols() == matrix.rows());
diff --git a/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h b/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
index dc240e13e..956e80d9e 100644
--- a/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
+++ b/Eigen/src/Eigenvalues/GeneralizedEigenSolver.h
@@ -263,6 +263,13 @@ template<typename _MatrixType> class GeneralizedEigenSolver
}
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ EIGEN_STATIC_ASSERT(!NumTraits<Scalar>::IsComplex, NUMERIC_TYPE_MUST_BE_REAL);
+ }
+
MatrixType m_eivec;
ComplexVectorType m_alphas;
VectorType m_betas;
@@ -290,6 +297,8 @@ template<typename MatrixType>
GeneralizedEigenSolver<MatrixType>&
GeneralizedEigenSolver<MatrixType>::compute(const MatrixType& A, const MatrixType& B, bool computeEigenvectors)
{
+ check_template_parameters();
+
using std::sqrt;
using std::abs;
eigen_assert(A.cols() == A.rows() && B.cols() == A.rows() && B.cols() == B.rows());
diff --git a/Eigen/src/Eigenvalues/RealQZ.h b/Eigen/src/Eigenvalues/RealQZ.h
index 4f36091db..fba6f1d77 100644
--- a/Eigen/src/Eigenvalues/RealQZ.h
+++ b/Eigen/src/Eigenvalues/RealQZ.h
@@ -240,21 +240,21 @@ namespace Eigen {
m_S.coeffRef(i,j) = Scalar(0.0);
m_S.rightCols(dim-j-1).applyOnTheLeft(i-1,i,G.adjoint());
m_T.rightCols(dim-i+1).applyOnTheLeft(i-1,i,G.adjoint());
+ // update Q
+ if (m_computeQZ)
+ m_Q.applyOnTheRight(i-1,i,G);
}
// update Q
- if (m_computeQZ)
- m_Q.applyOnTheRight(i-1,i,G);
- // kill T(i,i-1)
if(m_T.coeff(i,i-1)!=Scalar(0))
{
G.makeGivens(m_T.coeff(i,i), m_T.coeff(i,i-1), &m_T.coeffRef(i,i));
m_T.coeffRef(i,i-1) = Scalar(0.0);
m_S.applyOnTheRight(i,i-1,G);
m_T.topRows(i).applyOnTheRight(i,i-1,G);
+ // update Z
+ if (m_computeQZ)
+ m_Z.applyOnTheLeft(i,i-1,G.adjoint());
}
- // update Z
- if (m_computeQZ)
- m_Z.applyOnTheLeft(i,i-1,G.adjoint());
}
}
}
diff --git a/Eigen/src/Eigenvalues/RealSchur.h b/Eigen/src/Eigenvalues/RealSchur.h
index 64d136341..16d387537 100644
--- a/Eigen/src/Eigenvalues/RealSchur.h
+++ b/Eigen/src/Eigenvalues/RealSchur.h
@@ -234,7 +234,7 @@ template<typename _MatrixType> class RealSchur
typedef Matrix<Scalar,3,1> Vector3s;
Scalar computeNormOfT();
- Index findSmallSubdiagEntry(Index iu, const Scalar& norm);
+ Index findSmallSubdiagEntry(Index iu);
void splitOffTwoRows(Index iu, bool computeU, const Scalar& exshift);
void computeShift(Index iu, Index iter, Scalar& exshift, Vector3s& shiftInfo);
void initFrancisQRStep(Index il, Index iu, const Vector3s& shiftInfo, Index& im, Vector3s& firstHouseholderVector);
@@ -286,7 +286,7 @@ RealSchur<MatrixType>& RealSchur<MatrixType>::computeFromHessenberg(const HessMa
{
while (iu >= 0)
{
- Index il = findSmallSubdiagEntry(iu, norm);
+ Index il = findSmallSubdiagEntry(iu);
// Check for convergence
if (il == iu) // One root found
@@ -343,16 +343,14 @@ inline typename MatrixType::Scalar RealSchur<MatrixType>::computeNormOfT()
/** \internal Look for single small sub-diagonal element and returns its index */
template<typename MatrixType>
-inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu, const Scalar& norm)
+inline typename MatrixType::Index RealSchur<MatrixType>::findSmallSubdiagEntry(Index iu)
{
using std::abs;
Index res = iu;
while (res > 0)
{
Scalar s = abs(m_matT.coeff(res-1,res-1)) + abs(m_matT.coeff(res,res));
- if (s == 0.0)
- s = norm;
- if (abs(m_matT.coeff(res,res-1)) < NumTraits<Scalar>::epsilon() * s)
+ if (abs(m_matT.coeff(res,res-1)) <= NumTraits<Scalar>::epsilon() * s)
break;
res--;
}
@@ -457,9 +455,7 @@ inline void RealSchur<MatrixType>::initFrancisQRStep(Index il, Index iu, const V
const Scalar lhs = m_matT.coeff(im,im-1) * (abs(v.coeff(1)) + abs(v.coeff(2)));
const Scalar rhs = v.coeff(0) * (abs(m_matT.coeff(im-1,im-1)) + abs(Tmm) + abs(m_matT.coeff(im+1,im+1)));
if (abs(lhs) < NumTraits<Scalar>::epsilon() * rhs)
- {
break;
- }
}
}
diff --git a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
index be89de4a9..c2e76c884 100644
--- a/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
+++ b/Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h
@@ -351,6 +351,11 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
#endif // EIGEN2_SUPPORT
protected:
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
MatrixType m_eivec;
RealVectorType m_eivalues;
typename TridiagonalizationType::SubDiagonalType m_subdiag;
@@ -384,6 +389,8 @@ template<typename MatrixType>
SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
::compute(const MatrixType& matrix, int options)
{
+ check_template_parameters();
+
using std::abs;
eigen_assert(matrix.cols() == matrix.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
@@ -490,7 +497,12 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
typedef typename SolverType::MatrixType MatrixType;
typedef typename SolverType::RealVectorType VectorType;
typedef typename SolverType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ /** \internal
+ * Computes the roots of the characteristic polynomial of \a m.
+ * For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
+ */
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
using std::sqrt;
@@ -510,39 +522,48 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
// Construct the parameters used in classifying the roots of the equation
// and in solving the equation for the roots in closed form.
Scalar c2_over_3 = c2*s_inv3;
- Scalar a_over_3 = (c1 - c2*c2_over_3)*s_inv3;
- if (a_over_3 > Scalar(0))
+ Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
+ if(a_over_3<Scalar(0))
a_over_3 = Scalar(0);
Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
- Scalar q = half_b*half_b + a_over_3*a_over_3*a_over_3;
- if (q > Scalar(0))
+ Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
+ if(q<Scalar(0))
q = Scalar(0);
// Compute the eigenvalues by solving for the roots of the polynomial.
- Scalar rho = sqrt(-a_over_3);
- Scalar theta = atan2(sqrt(-q),half_b)*s_inv3;
+ Scalar rho = sqrt(a_over_3);
+ Scalar theta = atan2(sqrt(q),half_b)*s_inv3; // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
Scalar cos_theta = cos(theta);
Scalar sin_theta = sin(theta);
- roots(0) = c2_over_3 + Scalar(2)*rho*cos_theta;
- roots(1) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta);
- roots(2) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta);
-
- // Sort in increasing order.
- if (roots(0) >= roots(1))
- std::swap(roots(0),roots(1));
- if (roots(1) >= roots(2))
- {
- std::swap(roots(1),roots(2));
- if (roots(0) >= roots(1))
- std::swap(roots(0),roots(1));
- }
+ // roots are already sorted, since cos is monotonically decreasing on [0, pi]
+ roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
+ roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
+ roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
}
-
+
+ static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
+ {
+ using std::abs;
+ Index i0;
+ // Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
+ mat.diagonal().cwiseAbs().maxCoeff(&i0);
+ // mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
+ // so let's save it:
+ representative = mat.col(i0);
+ Scalar n0, n1;
+ VectorType c0, c1;
+ n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
+ n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
+ if(n0>n1) res = c0/std::sqrt(n0);
+ else res = c1/std::sqrt(n1);
+
+ return true;
+ }
+
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
- using std::sqrt;
eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
@@ -552,116 +573,72 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3
MatrixType& eivecs = solver.m_eivec;
VectorType& eivals = solver.m_eivalues;
- // map the matrix coefficients to [-1:1] to avoid over- and underflow.
- Scalar scale = mat.cwiseAbs().maxCoeff();
- MatrixType scaledMat = mat / scale;
+ // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
+ Scalar shift = mat.trace() / Scalar(3);
+ // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
+ MatrixType scaledMat = mat.template selfadjointView<Lower>();
+ scaledMat.diagonal().array() -= shift;
+ Scalar scale = scaledMat.cwiseAbs().maxCoeff();
+ if(scale > 0) scaledMat /= scale; // TODO for scale==0 we could save the remaining operations
// compute the eigenvalues
computeRoots(scaledMat,eivals);
- // compute the eigen vectors
+ // compute the eigenvectors
if(computeEigenvectors)
{
- Scalar safeNorm2 = Eigen::NumTraits<Scalar>::epsilon();
if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
{
+ // All three eigenvalues are numerically the same
eivecs.setIdentity();
}
else
{
- scaledMat = scaledMat.template selfadjointView<Lower>();
MatrixType tmp;
tmp = scaledMat;
+ // Compute the eigenvector of the most distinct eigenvalue
Scalar d0 = eivals(2) - eivals(1);
Scalar d1 = eivals(1) - eivals(0);
- int k = d0 > d1 ? 2 : 0;
- d0 = d0 > d1 ? d0 : d1;
-
- tmp.diagonal().array () -= eivals(k);
- VectorType cross;
- Scalar n;
- n = (cross = tmp.row(0).cross(tmp.row(1))).squaredNorm();
-
- if(n>safeNorm2)
+ Index k(0), l(2);
+ if(d0 > d1)
{
- eivecs.col(k) = cross / sqrt(n);
+ std::swap(k,l);
+ d0 = d1;
}
- else
+
+ // Compute the eigenvector of index k
{
- n = (cross = tmp.row(0).cross(tmp.row(2))).squaredNorm();
-
- if(n>safeNorm2)
- {
- eivecs.col(k) = cross / sqrt(n);
- }
- else
- {
- n = (cross = tmp.row(1).cross(tmp.row(2))).squaredNorm();
-
- if(n>safeNorm2)
- {
- eivecs.col(k) = cross / sqrt(n);
- }
- else
- {
- // the input matrix and/or the eigenvaues probably contains some inf/NaN,
- // => exit
- // scale back to the original size.
- eivals *= scale;
-
- solver.m_info = NumericalIssue;
- solver.m_isInitialized = true;
- solver.m_eigenvectorsOk = computeEigenvectors;
- return;
- }
- }
+ tmp.diagonal().array () -= eivals(k);
+ // By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
+ extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
}
- tmp = scaledMat;
- tmp.diagonal().array() -= eivals(1);
-
- if(d0<=Eigen::NumTraits<Scalar>::epsilon())
+ // Compute eigenvector of index l
+ if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
{
- eivecs.col(1) = eivecs.col(k).unitOrthogonal();
+ // If d0 is too small, then the two other eigenvalues are numerically the same,
+ // and thus we only have to ortho-normalize the near orthogonal vector we saved above.
+ eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
+ eivecs.col(l).normalize();
}
else
{
- n = (cross = eivecs.col(k).cross(tmp.row(0))).squaredNorm();
- if(n>safeNorm2)
- {
- eivecs.col(1) = cross / sqrt(n);
- }
- else
- {
- n = (cross = eivecs.col(k).cross(tmp.row(1))).squaredNorm();
- if(n>safeNorm2)
- eivecs.col(1) = cross / sqrt(n);
- else
- {
- n = (cross = eivecs.col(k).cross(tmp.row(2))).squaredNorm();
- if(n>safeNorm2)
- eivecs.col(1) = cross / sqrt(n);
- else
- {
- // we should never reach this point,
- // if so the last two eigenvalues are likely to be very close to each other
- eivecs.col(1) = eivecs.col(k).unitOrthogonal();
- }
- }
- }
-
- // make sure that eivecs[1] is orthogonal to eivecs[2]
- // FIXME: this step should not be needed
- Scalar d = eivecs.col(1).dot(eivecs.col(k));
- eivecs.col(1) = (eivecs.col(1) - d * eivecs.col(k)).normalized();
+ tmp = scaledMat;
+ tmp.diagonal().array () -= eivals(l);
+
+ VectorType dummy;
+ extract_kernel(tmp, eivecs.col(l), dummy);
}
- eivecs.col(k==2 ? 0 : 2) = eivecs.col(k).cross(eivecs.col(1)).normalized();
+ // Compute last eigenvector from the other two
+ eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
}
}
+
// Rescale back to the original size.
eivals *= scale;
+ eivals.array() += shift;
solver.m_info = Success;
solver.m_isInitialized = true;
@@ -679,7 +656,7 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
static inline void computeRoots(const MatrixType& m, VectorType& roots)
{
using std::sqrt;
- const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*m(1,0)*m(1,0));
+ const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
roots(0) = t1 - t0;
roots(1) = t1 + t0;
@@ -688,6 +665,8 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
static inline void run(SolverType& solver, const MatrixType& mat, int options)
{
using std::sqrt;
+ using std::abs;
+
eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
eigen_assert((options&~(EigVecMask|GenEigMask))==0
&& (options&EigVecMask)!=EigVecMask
@@ -708,22 +687,29 @@ template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,2
// compute the eigen vectors
if(computeEigenvectors)
{
- scaledMat.diagonal().array () -= eivals(1);
- Scalar a2 = numext::abs2(scaledMat(0,0));
- Scalar c2 = numext::abs2(scaledMat(1,1));
- Scalar b2 = numext::abs2(scaledMat(1,0));
- if(a2>c2)
+ if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
{
- eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
- eivecs.col(1) /= sqrt(a2+b2);
+ eivecs.setIdentity();
}
else
{
- eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
- eivecs.col(1) /= sqrt(c2+b2);
- }
+ scaledMat.diagonal().array () -= eivals(1);
+ Scalar a2 = numext::abs2(scaledMat(0,0));
+ Scalar c2 = numext::abs2(scaledMat(1,1));
+ Scalar b2 = numext::abs2(scaledMat(1,0));
+ if(a2>c2)
+ {
+ eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
+ eivecs.col(1) /= sqrt(a2+b2);
+ }
+ else
+ {
+ eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
+ eivecs.col(1) /= sqrt(c2+b2);
+ }
- eivecs.col(0) << eivecs.col(1).unitOrthogonal();
+ eivecs.col(0) << eivecs.col(1).unitOrthogonal();
+ }
}
// Rescale back to the original size.
diff --git a/Eigen/src/Geometry/AlignedBox.h b/Eigen/src/Geometry/AlignedBox.h
index 8e186d57a..b226336de 100644
--- a/Eigen/src/Geometry/AlignedBox.h
+++ b/Eigen/src/Geometry/AlignedBox.h
@@ -19,10 +19,12 @@ namespace Eigen {
*
* \brief An axis aligned box
*
- * \param _Scalar the type of the scalar coefficients
- * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
+ * \tparam _Scalar the type of the scalar coefficients
+ * \tparam _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic.
*
* This class represents an axis aligned box as a pair of the minimal and maximal corners.
+ * \warning The result of most methods is undefined when applied to an empty box. You can check for empty boxes using isEmpty().
+ * \sa alignedboxtypedefs
*/
template <typename _Scalar, int _AmbientDim>
class AlignedBox
@@ -40,18 +42,21 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** Define constants to name the corners of a 1D, 2D or 3D axis aligned bounding box */
enum CornerType
{
- /** 1D names */
+ /** 1D names @{ */
Min=0, Max=1,
+ /** @} */
- /** Added names for 2D */
+ /** Identifier for 2D corner @{ */
BottomLeft=0, BottomRight=1,
TopLeft=2, TopRight=3,
+ /** @} */
- /** Added names for 3D */
+ /** Identifier for 3D corner @{ */
BottomLeftFloor=0, BottomRightFloor=1,
TopLeftFloor=2, TopRightFloor=3,
BottomLeftCeil=4, BottomRightCeil=5,
TopLeftCeil=6, TopRightCeil=7
+ /** @} */
};
@@ -63,34 +68,33 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
inline explicit AlignedBox(Index _dim) : m_min(_dim), m_max(_dim)
{ setEmpty(); }
- /** Constructs a box with extremities \a _min and \a _max. */
+ /** Constructs a box with extremities \a _min and \a _max.
+ * \warning If either component of \a _min is larger than the same component of \a _max, the constructed box is empty. */
template<typename OtherVectorType1, typename OtherVectorType2>
inline AlignedBox(const OtherVectorType1& _min, const OtherVectorType2& _max) : m_min(_min), m_max(_max) {}
/** Constructs a box containing a single point \a p. */
template<typename Derived>
- inline explicit AlignedBox(const MatrixBase<Derived>& a_p)
- {
- typename internal::nested<Derived,2>::type p(a_p.derived());
- m_min = p;
- m_max = p;
- }
+ inline explicit AlignedBox(const MatrixBase<Derived>& p) : m_min(p), m_max(m_min)
+ { }
~AlignedBox() {}
/** \returns the dimension in which the box holds */
inline Index dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size() : Index(AmbientDimAtCompileTime); }
- /** \deprecated use isEmpty */
+ /** \deprecated use isEmpty() */
inline bool isNull() const { return isEmpty(); }
- /** \deprecated use setEmpty */
+ /** \deprecated use setEmpty() */
inline void setNull() { setEmpty(); }
- /** \returns true if the box is empty. */
+ /** \returns true if the box is empty.
+ * \sa setEmpty */
inline bool isEmpty() const { return (m_min.array() > m_max.array()).any(); }
- /** Makes \c *this an empty box. */
+ /** Makes \c *this an empty box.
+ * \sa isEmpty */
inline void setEmpty()
{
m_min.setConstant( ScalarTraits::highest() );
@@ -175,27 +179,34 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** \returns true if the point \a p is inside the box \c *this. */
template<typename Derived>
- inline bool contains(const MatrixBase<Derived>& a_p) const
+ inline bool contains(const MatrixBase<Derived>& p) const
{
- typename internal::nested<Derived,2>::type p(a_p.derived());
- return (m_min.array()<=p.array()).all() && (p.array()<=m_max.array()).all();
+ typename internal::nested<Derived,2>::type p_n(p.derived());
+ return (m_min.array()<=p_n.array()).all() && (p_n.array()<=m_max.array()).all();
}
/** \returns true if the box \a b is entirely inside the box \c *this. */
inline bool contains(const AlignedBox& b) const
{ return (m_min.array()<=(b.min)().array()).all() && ((b.max)().array()<=m_max.array()).all(); }
- /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */
+ /** \returns true if the box \a b is intersecting the box \c *this.
+ * \sa intersection, clamp */
+ inline bool intersects(const AlignedBox& b) const
+ { return (m_min.array()<=(b.max)().array()).all() && ((b.min)().array()<=m_max.array()).all(); }
+
+ /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this.
+ * \sa extend(const AlignedBox&) */
template<typename Derived>
- inline AlignedBox& extend(const MatrixBase<Derived>& a_p)
+ inline AlignedBox& extend(const MatrixBase<Derived>& p)
{
- typename internal::nested<Derived,2>::type p(a_p.derived());
- m_min = m_min.cwiseMin(p);
- m_max = m_max.cwiseMax(p);
+ typename internal::nested<Derived,2>::type p_n(p.derived());
+ m_min = m_min.cwiseMin(p_n);
+ m_max = m_max.cwiseMax(p_n);
return *this;
}
- /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */
+ /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this.
+ * \sa merged, extend(const MatrixBase&) */
inline AlignedBox& extend(const AlignedBox& b)
{
m_min = m_min.cwiseMin(b.m_min);
@@ -203,7 +214,9 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
return *this;
}
- /** Clamps \c *this by the box \a b and returns a reference to \c *this. */
+ /** Clamps \c *this by the box \a b and returns a reference to \c *this.
+ * \note If the boxes don't intersect, the resulting box is empty.
+ * \sa intersection(), intersects() */
inline AlignedBox& clamp(const AlignedBox& b)
{
m_min = m_min.cwiseMax(b.m_min);
@@ -211,11 +224,15 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
return *this;
}
- /** Returns an AlignedBox that is the intersection of \a b and \c *this */
+ /** Returns an AlignedBox that is the intersection of \a b and \c *this
+ * \note If the boxes don't intersect, the resulting box is empty.
+ * \sa intersects(), clamp, contains() */
inline AlignedBox intersection(const AlignedBox& b) const
{return AlignedBox(m_min.cwiseMax(b.m_min), m_max.cwiseMin(b.m_max)); }
- /** Returns an AlignedBox that is the union of \a b and \c *this */
+ /** Returns an AlignedBox that is the union of \a b and \c *this.
+ * \note Merging with an empty box may result in a box bigger than \c *this.
+ * \sa extend(const AlignedBox&) */
inline AlignedBox merged(const AlignedBox& b) const
{ return AlignedBox(m_min.cwiseMin(b.m_min), m_max.cwiseMax(b.m_max)); }
@@ -231,20 +248,20 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** \returns the squared distance between the point \a p and the box \c *this,
* and zero if \a p is inside the box.
- * \sa exteriorDistance()
+ * \sa exteriorDistance(const MatrixBase&), squaredExteriorDistance(const AlignedBox&)
*/
template<typename Derived>
- inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& a_p) const;
+ inline Scalar squaredExteriorDistance(const MatrixBase<Derived>& p) const;
/** \returns the squared distance between the boxes \a b and \c *this,
* and zero if the boxes intersect.
- * \sa exteriorDistance()
+ * \sa exteriorDistance(const AlignedBox&), squaredExteriorDistance(const MatrixBase&)
*/
inline Scalar squaredExteriorDistance(const AlignedBox& b) const;
/** \returns the distance between the point \a p and the box \c *this,
* and zero if \a p is inside the box.
- * \sa squaredExteriorDistance()
+ * \sa squaredExteriorDistance(const MatrixBase&), exteriorDistance(const AlignedBox&)
*/
template<typename Derived>
inline NonInteger exteriorDistance(const MatrixBase<Derived>& p) const
@@ -252,7 +269,7 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim)
/** \returns the distance between the boxes \a b and \c *this,
* and zero if the boxes intersect.
- * \sa squaredExteriorDistance()
+ * \sa squaredExteriorDistance(const AlignedBox&), exteriorDistance(const MatrixBase&)
*/
inline NonInteger exteriorDistance(const AlignedBox& b) const
{ using std::sqrt; return sqrt(NonInteger(squaredExteriorDistance(b))); }
diff --git a/Eigen/src/Geometry/Homogeneous.h b/Eigen/src/Geometry/Homogeneous.h
index 00e71d190..372e422b9 100644
--- a/Eigen/src/Geometry/Homogeneous.h
+++ b/Eigen/src/Geometry/Homogeneous.h
@@ -79,7 +79,7 @@ template<typename MatrixType,int _Direction> class Homogeneous
{
if( (int(Direction)==Vertical && row==m_matrix.rows())
|| (int(Direction)==Horizontal && col==m_matrix.cols()))
- return 1;
+ return Scalar(1);
return m_matrix.coeff(row, col);
}
diff --git a/Eigen/src/Geometry/Quaternion.h b/Eigen/src/Geometry/Quaternion.h
index f06653f1c..93056f60d 100644
--- a/Eigen/src/Geometry/Quaternion.h
+++ b/Eigen/src/Geometry/Quaternion.h
@@ -161,7 +161,7 @@ public:
{ return coeffs().isApprox(other.coeffs(), prec); }
/** return the result vector of \a v through the rotation*/
- EIGEN_STRONG_INLINE Vector3 _transformVector(Vector3 v) const;
+ EIGEN_STRONG_INLINE Vector3 _transformVector(const Vector3& v) const;
/** \returns \c *this with scalar type casted to \a NewScalarType
*
@@ -231,7 +231,7 @@ class Quaternion : public QuaternionBase<Quaternion<_Scalar,_Options> >
public:
typedef _Scalar Scalar;
- EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Quaternion)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Quaternion)
using Base::operator*=;
typedef typename internal::traits<Quaternion>::Coefficients Coefficients;
@@ -341,7 +341,7 @@ class Map<const Quaternion<_Scalar>, _Options >
public:
typedef _Scalar Scalar;
typedef typename internal::traits<Map>::Coefficients Coefficients;
- EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
using Base::operator*=;
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
@@ -378,7 +378,7 @@ class Map<Quaternion<_Scalar>, _Options >
public:
typedef _Scalar Scalar;
typedef typename internal::traits<Map>::Coefficients Coefficients;
- EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Map)
+ EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
using Base::operator*=;
/** Constructs a Mapped Quaternion object from the pointer \a coeffs
@@ -461,7 +461,7 @@ EIGEN_STRONG_INLINE Derived& QuaternionBase<Derived>::operator*= (const Quaterni
*/
template <class Derived>
EIGEN_STRONG_INLINE typename QuaternionBase<Derived>::Vector3
-QuaternionBase<Derived>::_transformVector(Vector3 v) const
+QuaternionBase<Derived>::_transformVector(const Vector3& v) const
{
// Note that this algorithm comes from the optimization by hand
// of the conversion to a Matrix followed by a Matrix/Vector product.
@@ -637,7 +637,7 @@ inline Quaternion<typename internal::traits<Derived>::Scalar> QuaternionBase<Der
{
// FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ??
Scalar n2 = this->squaredNorm();
- if (n2 > 0)
+ if (n2 > Scalar(0))
return Quaternion<Scalar>(conjugate().coeffs() / n2);
else
{
@@ -667,12 +667,10 @@ template <class OtherDerived>
inline typename internal::traits<Derived>::Scalar
QuaternionBase<Derived>::angularDistance(const QuaternionBase<OtherDerived>& other) const
{
- using std::acos;
+ using std::atan2;
using std::abs;
- Scalar d = abs(this->dot(other));
- if (d>=Scalar(1))
- return Scalar(0);
- return Scalar(2) * acos(d);
+ Quaternion<Scalar> d = (*this) * other.conjugate();
+ return Scalar(2) * atan2( d.vec().norm(), abs(d.w()) );
}
@@ -712,7 +710,7 @@ QuaternionBase<Derived>::slerp(const Scalar& t, const QuaternionBase<OtherDerive
scale0 = sin( ( Scalar(1) - t ) * theta) / sinTheta;
scale1 = sin( ( t * theta) ) / sinTheta;
}
- if(d<0) scale1 = -scale1;
+ if(d<Scalar(0)) scale1 = -scale1;
return Quaternion<Scalar>(scale0 * coeffs() + scale1 * other.coeffs());
}
diff --git a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
index dd135c21f..2625c4dc3 100644
--- a/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
+++ b/Eigen/src/IterativeLinearSolvers/BiCGSTAB.h
@@ -151,20 +151,7 @@ struct traits<BiCGSTAB<_MatrixType,_Preconditioner> >
* \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
- * One can control the start using the solveWithGuess() method. Here is a step by
- * step execution example starting with a random guess and printing the evolution
- * of the estimated error:
- * * \code
- * x = VectorXd::Random(n);
- * solver.setMaxIterations(1);
- * int i = 0;
- * do {
- * x = solver.solveWithGuess(b,x);
- * std::cout << i << " : " << solver.error() << std::endl;
- * ++i;
- * } while (solver.info()!=Success && i<100);
- * \endcode
- * Note that such a step by step excution is slightly slower.
+ * One can control the start using the solveWithGuess() method.
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
diff --git a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
index a74a8155e..8ba4a8dbe 100644
--- a/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
+++ b/Eigen/src/IterativeLinearSolvers/ConjugateGradient.h
@@ -112,9 +112,9 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
* This class allows to solve for A.x = b sparse linear problems using a conjugate gradient algorithm.
* The sparse matrix A must be selfadjoint. The vectors x and b can be either dense or sparse.
*
- * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix.
- * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower
- * or Upper. Default is Lower.
+ * \tparam _MatrixType the type of the matrix A, can be a dense or a sparse matrix.
+ * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower,
+ * Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower.
* \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner
*
* The maximal number of iterations and tolerance value can be controlled via the setMaxIterations()
@@ -137,20 +137,7 @@ struct traits<ConjugateGradient<_MatrixType,_UpLo,_Preconditioner> >
* \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
- * One can control the start using the solveWithGuess() method. Here is a step by
- * step execution example starting with a random guess and printing the evolution
- * of the estimated error:
- * * \code
- * x = VectorXd::Random(n);
- * cg.setMaxIterations(1);
- * int i = 0;
- * do {
- * x = cg.solveWithGuess(b,x);
- * std::cout << i << " : " << cg.error() << std::endl;
- * ++i;
- * } while (cg.info()!=Success && i<100);
- * \endcode
- * Note that such a step by step excution is slightly slower.
+ * One can control the start using the solveWithGuess() method.
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
@@ -213,6 +200,10 @@ public:
template<typename Rhs,typename Dest>
void _solveWithGuess(const Rhs& b, Dest& x) const
{
+ typedef typename internal::conditional<UpLo==(Lower|Upper),
+ const MatrixType&,
+ SparseSelfAdjointView<const MatrixType, UpLo>
+ >::type MatrixWrapperType;
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
@@ -222,8 +213,7 @@ public:
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
- internal::conjugate_gradient(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
- Base::m_preconditioner, m_iterations, m_error);
+ internal::conjugate_gradient(MatrixWrapperType(*mp_matrix), b.col(j), xj, Base::m_preconditioner, m_iterations, m_error);
}
m_isInitialized = true;
@@ -234,7 +224,7 @@ public:
template<typename Rhs,typename Dest>
void _solve(const Rhs& b, Dest& x) const
{
- x.setOnes();
+ x.setZero();
_solveWithGuess(b,x);
}
diff --git a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
index b55afc136..4c169aa60 100644
--- a/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
+++ b/Eigen/src/IterativeLinearSolvers/IncompleteLUT.h
@@ -150,7 +150,6 @@ class IncompleteLUT : internal::noncopyable
{
analyzePattern(amat);
factorize(amat);
- m_isInitialized = m_factorizationIsOk;
return *this;
}
@@ -235,6 +234,8 @@ void IncompleteLUT<Scalar>::analyzePattern(const _MatrixType& amat)
m_Pinv = m_P.inverse(); // ... and the inverse permutation
m_analysisIsOk = true;
+ m_factorizationIsOk = false;
+ m_isInitialized = false;
}
template <typename Scalar>
@@ -442,6 +443,7 @@ void IncompleteLUT<Scalar>::factorize(const _MatrixType& amat)
m_lu.makeCompressed();
m_factorizationIsOk = true;
+ m_isInitialized = m_factorizationIsOk;
m_info = Success;
}
diff --git a/Eigen/src/LU/FullPivLU.h b/Eigen/src/LU/FullPivLU.h
index 79ab6a8c8..26bc71447 100644
--- a/Eigen/src/LU/FullPivLU.h
+++ b/Eigen/src/LU/FullPivLU.h
@@ -374,6 +374,12 @@ template<typename _MatrixType> class FullPivLU
inline Index cols() const { return m_lu.cols(); }
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
MatrixType m_lu;
PermutationPType m_p;
PermutationQType m_q;
@@ -418,6 +424,8 @@ FullPivLU<MatrixType>::FullPivLU(const MatrixType& matrix)
template<typename MatrixType>
FullPivLU<MatrixType>& FullPivLU<MatrixType>::compute(const MatrixType& matrix)
{
+ check_template_parameters();
+
// the permutations are stored as int indices, so just to be sure:
eigen_assert(matrix.rows()<=NumTraits<int>::highest() && matrix.cols()<=NumTraits<int>::highest());
diff --git a/Eigen/src/LU/PartialPivLU.h b/Eigen/src/LU/PartialPivLU.h
index 740ee694c..7d1db948c 100644
--- a/Eigen/src/LU/PartialPivLU.h
+++ b/Eigen/src/LU/PartialPivLU.h
@@ -171,6 +171,12 @@ template<typename _MatrixType> class PartialPivLU
inline Index cols() const { return m_lu.cols(); }
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
MatrixType m_lu;
PermutationType m_p;
TranspositionType m_rowsTranspositions;
@@ -386,6 +392,8 @@ void partial_lu_inplace(MatrixType& lu, TranspositionType& row_transpositions, t
template<typename MatrixType>
PartialPivLU<MatrixType>& PartialPivLU<MatrixType>::compute(const MatrixType& matrix)
{
+ check_template_parameters();
+
// the row permutation is stored as int indices, so just to be sure:
eigen_assert(matrix.rows()<NumTraits<int>::highest());
diff --git a/Eigen/src/OrderingMethods/Amd.h b/Eigen/src/OrderingMethods/Amd.h
index 41b4fd7e3..1c28bdf41 100644
--- a/Eigen/src/OrderingMethods/Amd.h
+++ b/Eigen/src/OrderingMethods/Amd.h
@@ -144,15 +144,23 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
/* --- Initialize degree lists ------------------------------------------ */
for(i = 0; i < n; i++)
{
+ bool has_diag = false;
+ for(p = Cp[i]; p<Cp[i+1]; ++p)
+ if(Ci[p]==i)
+ {
+ has_diag = true;
+ break;
+ }
+
d = degree[i];
- if(d == 0) /* node i is empty */
+ if(d == 1) /* node i is empty */
{
elen[i] = -2; /* element i is dead */
nel++;
Cp[i] = -1; /* i is a root of assembly tree */
w[i] = 0;
}
- else if(d > dense) /* node i is dense */
+ else if(d > dense || !has_diag) /* node i is dense or has no structural diagonal element */
{
nv[i] = 0; /* absorb i into element n */
elen[i] = -1; /* node i is dead */
@@ -168,6 +176,10 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
}
}
+ elen[n] = -2; /* n is a dead element */
+ Cp[n] = -1; /* n is a root of assembly tree */
+ w[n] = 0; /* n is a dead element */
+
while (nel < n) /* while (selecting pivots) do */
{
/* --- Select node of minimum approximate degree -------------------- */
diff --git a/Eigen/src/QR/ColPivHouseholderQR.h b/Eigen/src/QR/ColPivHouseholderQR.h
index 773d1df8f..567eab7cd 100644
--- a/Eigen/src/QR/ColPivHouseholderQR.h
+++ b/Eigen/src/QR/ColPivHouseholderQR.h
@@ -384,6 +384,12 @@ template<typename _MatrixType> class ColPivHouseholderQR
}
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
MatrixType m_qr;
HCoeffsType m_hCoeffs;
PermutationType m_colsPermutation;
@@ -422,6 +428,8 @@ typename MatrixType::RealScalar ColPivHouseholderQR<MatrixType>::logAbsDetermina
template<typename MatrixType>
ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
+ check_template_parameters();
+
using std::abs;
Index rows = matrix.rows();
Index cols = matrix.cols();
@@ -463,20 +471,10 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
// we store that back into our table: it can't hurt to correct our table.
m_colSqNorms.coeffRef(biggest_col_index) = biggest_col_sq_norm;
- // if the current biggest column is smaller than epsilon times the initial biggest column,
- // terminate to avoid generating nan/inf values.
- // Note that here, if we test instead for "biggest == 0", we get a failure every 1000 (or so)
- // repetitions of the unit test, with the result of solve() filled with large values of the order
- // of 1/(size*epsilon).
- if(biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
- {
+ // Track the number of meaningful pivots but do not stop the decomposition to make
+ // sure that the initial matrix is properly reproduced. See bug 941.
+ if(m_nonzero_pivots==size && biggest_col_sq_norm < threshold_helper * RealScalar(rows-k))
m_nonzero_pivots = k;
- m_hCoeffs.tail(size-k).setZero();
- m_qr.bottomRightCorner(rows-k,cols-k)
- .template triangularView<StrictlyLower>()
- .setZero();
- break;
- }
// apply the transposition to the columns
m_colsTranspositions.coeffRef(k) = biggest_col_index;
@@ -505,7 +503,7 @@ ColPivHouseholderQR<MatrixType>& ColPivHouseholderQR<MatrixType>::compute(const
}
m_colsPermutation.setIdentity(PermIndexType(cols));
- for(PermIndexType k = 0; k < m_nonzero_pivots; ++k)
+ for(PermIndexType k = 0; k < size/*m_nonzero_pivots*/; ++k)
m_colsPermutation.applyTranspositionOnTheRight(k, PermIndexType(m_colsTranspositions.coeff(k)));
m_det_pq = (number_of_transpositions%2) ? -1 : 1;
@@ -555,13 +553,15 @@ struct solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
} // end namespace internal
-/** \returns the matrix Q as a sequence of householder transformations */
+/** \returns the matrix Q as a sequence of householder transformations.
+ * You can extract the meaningful part only by using:
+ * \code qr.householderQ().setLength(qr.nonzeroPivots()) \endcode*/
template<typename MatrixType>
typename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHouseholderQR<MatrixType>
::householderQ() const
{
eigen_assert(m_isInitialized && "ColPivHouseholderQR is not initialized.");
- return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate()).setLength(m_nonzero_pivots);
+ return HouseholderSequenceType(m_qr, m_hCoeffs.conjugate());
}
/** \return the column-pivoting Householder QR decomposition of \c *this.
diff --git a/Eigen/src/QR/FullPivHouseholderQR.h b/Eigen/src/QR/FullPivHouseholderQR.h
index 6168e7abf..0b39966e1 100644
--- a/Eigen/src/QR/FullPivHouseholderQR.h
+++ b/Eigen/src/QR/FullPivHouseholderQR.h
@@ -368,6 +368,12 @@ template<typename _MatrixType> class FullPivHouseholderQR
RealScalar maxPivot() const { return m_maxpivot; }
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
MatrixType m_qr;
HCoeffsType m_hCoeffs;
IntDiagSizeVectorType m_rows_transpositions;
@@ -407,6 +413,8 @@ typename MatrixType::RealScalar FullPivHouseholderQR<MatrixType>::logAbsDetermin
template<typename MatrixType>
FullPivHouseholderQR<MatrixType>& FullPivHouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
+ check_template_parameters();
+
using std::abs;
Index rows = matrix.rows();
Index cols = matrix.cols();
diff --git a/Eigen/src/QR/HouseholderQR.h b/Eigen/src/QR/HouseholderQR.h
index abc61bcbb..343a66499 100644
--- a/Eigen/src/QR/HouseholderQR.h
+++ b/Eigen/src/QR/HouseholderQR.h
@@ -189,6 +189,12 @@ template<typename _MatrixType> class HouseholderQR
const HCoeffsType& hCoeffs() const { return m_hCoeffs; }
protected:
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
+
MatrixType m_qr;
HCoeffsType m_hCoeffs;
RowVectorType m_temp;
@@ -251,56 +257,62 @@ void householder_qr_inplace_unblocked(MatrixQR& mat, HCoeffs& hCoeffs, typename
}
/** \internal */
-template<typename MatrixQR, typename HCoeffs>
-void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs,
- typename MatrixQR::Index maxBlockSize=32,
- typename MatrixQR::Scalar* tempData = 0)
+template<typename MatrixQR, typename HCoeffs,
+ typename MatrixQRScalar = typename MatrixQR::Scalar,
+ bool InnerStrideIsOne = (MatrixQR::InnerStrideAtCompileTime == 1 && HCoeffs::InnerStrideAtCompileTime == 1)>
+struct householder_qr_inplace_blocked
{
- typedef typename MatrixQR::Index Index;
- typedef typename MatrixQR::Scalar Scalar;
- typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;
-
- Index rows = mat.rows();
- Index cols = mat.cols();
- Index size = (std::min)(rows, cols);
-
- typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
- TempType tempVector;
- if(tempData==0)
+ // This is specialized for MKL-supported Scalar types in HouseholderQR_MKL.h
+ static void run(MatrixQR& mat, HCoeffs& hCoeffs,
+ typename MatrixQR::Index maxBlockSize=32,
+ typename MatrixQR::Scalar* tempData = 0)
{
- tempVector.resize(cols);
- tempData = tempVector.data();
- }
-
- Index blockSize = (std::min)(maxBlockSize,size);
+ typedef typename MatrixQR::Index Index;
+ typedef typename MatrixQR::Scalar Scalar;
+ typedef Block<MatrixQR,Dynamic,Dynamic> BlockType;
- Index k = 0;
- for (k = 0; k < size; k += blockSize)
- {
- Index bs = (std::min)(size-k,blockSize); // actual size of the block
- Index tcols = cols - k - bs; // trailing columns
- Index brows = rows-k; // rows of the block
+ Index rows = mat.rows();
+ Index cols = mat.cols();
+ Index size = (std::min)(rows, cols);
- // partition the matrix:
- // A00 | A01 | A02
- // mat = A10 | A11 | A12
- // A20 | A21 | A22
- // and performs the qr dec of [A11^T A12^T]^T
- // and update [A21^T A22^T]^T using level 3 operations.
- // Finally, the algorithm continue on A22
-
- BlockType A11_21 = mat.block(k,k,brows,bs);
- Block<HCoeffs,Dynamic,1> hCoeffsSegment = hCoeffs.segment(k,bs);
+ typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixQR::MaxColsAtCompileTime,1> TempType;
+ TempType tempVector;
+ if(tempData==0)
+ {
+ tempVector.resize(cols);
+ tempData = tempVector.data();
+ }
- householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData);
+ Index blockSize = (std::min)(maxBlockSize,size);
- if(tcols)
+ Index k = 0;
+ for (k = 0; k < size; k += blockSize)
{
- BlockType A21_22 = mat.block(k,k+bs,brows,tcols);
- apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment.adjoint());
+ Index bs = (std::min)(size-k,blockSize); // actual size of the block
+ Index tcols = cols - k - bs; // trailing columns
+ Index brows = rows-k; // rows of the block
+
+ // partition the matrix:
+ // A00 | A01 | A02
+ // mat = A10 | A11 | A12
+ // A20 | A21 | A22
+ // and performs the qr dec of [A11^T A12^T]^T
+ // and update [A21^T A22^T]^T using level 3 operations.
+ // Finally, the algorithm continue on A22
+
+ BlockType A11_21 = mat.block(k,k,brows,bs);
+ Block<HCoeffs,Dynamic,1> hCoeffsSegment = hCoeffs.segment(k,bs);
+
+ householder_qr_inplace_unblocked(A11_21, hCoeffsSegment, tempData);
+
+ if(tcols)
+ {
+ BlockType A21_22 = mat.block(k,k+bs,brows,tcols);
+ apply_block_householder_on_the_left(A21_22,A11_21,hCoeffsSegment.adjoint());
+ }
}
}
-}
+};
template<typename _MatrixType, typename Rhs>
struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
@@ -343,6 +355,8 @@ struct solve_retval<HouseholderQR<_MatrixType>, Rhs>
template<typename MatrixType>
HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType& matrix)
{
+ check_template_parameters();
+
Index rows = matrix.rows();
Index cols = matrix.cols();
Index size = (std::min)(rows,cols);
@@ -352,7 +366,7 @@ HouseholderQR<MatrixType>& HouseholderQR<MatrixType>::compute(const MatrixType&
m_temp.resize(cols);
- internal::householder_qr_inplace_blocked(m_qr, m_hCoeffs, 48, m_temp.data());
+ internal::householder_qr_inplace_blocked<MatrixType, HCoeffsType>::run(m_qr, m_hCoeffs, 48, m_temp.data());
m_isInitialized = true;
return *this;
diff --git a/Eigen/src/QR/HouseholderQR_MKL.h b/Eigen/src/QR/HouseholderQR_MKL.h
index 5313de604..b80f1b48d 100644
--- a/Eigen/src/QR/HouseholderQR_MKL.h
+++ b/Eigen/src/QR/HouseholderQR_MKL.h
@@ -34,28 +34,30 @@
#ifndef EIGEN_QR_MKL_H
#define EIGEN_QR_MKL_H
-#include "Eigen/src/Core/util/MKL_support.h"
+#include "../Core/util/MKL_support.h"
namespace Eigen {
-namespace internal {
+ namespace internal {
-/** \internal Specialization for the data types supported by MKL */
+ /** \internal Specialization for the data types supported by MKL */
#define EIGEN_MKL_QR_NOPIV(EIGTYPE, MKLTYPE, MKLPREFIX) \
template<typename MatrixQR, typename HCoeffs> \
-void householder_qr_inplace_blocked(MatrixQR& mat, HCoeffs& hCoeffs, \
- typename MatrixQR::Index maxBlockSize=32, \
- EIGTYPE* tempData = 0) \
+struct householder_qr_inplace_blocked<MatrixQR, HCoeffs, EIGTYPE, true> \
{ \
- lapack_int m = mat.rows(); \
- lapack_int n = mat.cols(); \
- lapack_int lda = mat.outerStride(); \
- lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
- LAPACKE_##MKLPREFIX##geqrf( matrix_order, m, n, (MKLTYPE*)mat.data(), lda, (MKLTYPE*)hCoeffs.data()); \
- hCoeffs.adjointInPlace(); \
-\
-}
+ static void run(MatrixQR& mat, HCoeffs& hCoeffs, \
+ typename MatrixQR::Index = 32, \
+ typename MatrixQR::Scalar* = 0) \
+ { \
+ lapack_int m = (lapack_int) mat.rows(); \
+ lapack_int n = (lapack_int) mat.cols(); \
+ lapack_int lda = (lapack_int) mat.outerStride(); \
+ lapack_int matrix_order = (MatrixQR::IsRowMajor) ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \
+ LAPACKE_##MKLPREFIX##geqrf( matrix_order, m, n, (MKLTYPE*)mat.data(), lda, (MKLTYPE*)hCoeffs.data()); \
+ hCoeffs.adjointInPlace(); \
+ } \
+};
EIGEN_MKL_QR_NOPIV(double, double, d)
EIGEN_MKL_QR_NOPIV(float, float, s)
diff --git a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
index a2cc2a9e2..de00877de 100644
--- a/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
+++ b/Eigen/src/SPQRSupport/SuiteSparseQRSupport.h
@@ -64,19 +64,13 @@ class SPQR
typedef PermutationMatrix<Dynamic, Dynamic> PermutationType;
public:
SPQR()
- : m_isInitialized(false),
- m_ordering(SPQR_ORDERING_DEFAULT),
- m_allow_tol(SPQR_DEFAULT_TOL),
- m_tolerance (NumTraits<Scalar>::epsilon())
+ : m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
{
cholmod_l_start(&m_cc);
}
- SPQR(const _MatrixType& matrix)
- : m_isInitialized(false),
- m_ordering(SPQR_ORDERING_DEFAULT),
- m_allow_tol(SPQR_DEFAULT_TOL),
- m_tolerance (NumTraits<Scalar>::epsilon())
+ SPQR(const _MatrixType& matrix)
+ : m_isInitialized(false), m_ordering(SPQR_ORDERING_DEFAULT), m_allow_tol(SPQR_DEFAULT_TOL), m_tolerance (NumTraits<Scalar>::epsilon()), m_useDefaultThreshold(true)
{
cholmod_l_start(&m_cc);
compute(matrix);
@@ -101,10 +95,26 @@ class SPQR
if(m_isInitialized) SPQR_free();
MatrixType mat(matrix);
+
+ /* Compute the default threshold as in MatLab, see:
+ * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
+ * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
+ */
+ RealScalar pivotThreshold = m_tolerance;
+ if(m_useDefaultThreshold)
+ {
+ using std::max;
+ RealScalar max2Norm = 0.0;
+ for (int j = 0; j < mat.cols(); j++) max2Norm = (max)(max2Norm, mat.col(j).norm());
+ if(max2Norm==RealScalar(0))
+ max2Norm = RealScalar(1);
+ pivotThreshold = 20 * (mat.rows() + mat.cols()) * max2Norm * NumTraits<RealScalar>::epsilon();
+ }
+
cholmod_sparse A;
A = viewAsCholmod(mat);
Index col = matrix.cols();
- m_rank = SuiteSparseQR<Scalar>(m_ordering, m_tolerance, col, &A,
+ m_rank = SuiteSparseQR<Scalar>(m_ordering, pivotThreshold, col, &A,
&m_cR, &m_E, &m_H, &m_HPinv, &m_HTau, &m_cc);
if (!m_cR)
@@ -120,7 +130,7 @@ class SPQR
/**
* Get the number of rows of the input matrix and the Q matrix
*/
- inline Index rows() const {return m_H->nrow; }
+ inline Index rows() const {return m_cR->nrow; }
/**
* Get the number of columns of the input matrix.
@@ -145,16 +155,25 @@ class SPQR
{
eigen_assert(m_isInitialized && " The QR factorization should be computed first, call compute()");
eigen_assert(b.cols()==1 && "This method is for vectors only");
-
+
//Compute Q^T * b
- typename Dest::PlainObject y;
+ typename Dest::PlainObject y, y2;
y = matrixQ().transpose() * b;
- // Solves with the triangular matrix R
+
+ // Solves with the triangular matrix R
Index rk = this->rank();
- y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y.topRows(rk));
- y.bottomRows(cols()-rk).setZero();
+ y2 = y;
+ y.resize((std::max)(cols(),Index(y.rows())),y.cols());
+ y.topRows(rk) = this->matrixR().topLeftCorner(rk, rk).template triangularView<Upper>().solve(y2.topRows(rk));
+
// Apply the column permutation
- dest.topRows(cols()) = colsPermutation() * y.topRows(cols());
+ // colsPermutation() performs a copy of the permutation,
+ // so let's apply it manually:
+ for(Index i = 0; i < rk; ++i) dest.row(m_E[i]) = y.row(i);
+ for(Index i = rk; i < cols(); ++i) dest.row(m_E[i]).setZero();
+
+// y.bottomRows(y.rows()-rk).setZero();
+// dest = colsPermutation() * y.topRows(cols());
m_info = Success;
}
@@ -197,7 +216,11 @@ class SPQR
/// Set the fill-reducing ordering method to be used
void setSPQROrdering(int ord) { m_ordering = ord;}
/// Set the tolerance tol to treat columns with 2-norm < =tol as zero
- void setPivotThreshold(const RealScalar& tol) { m_tolerance = tol; }
+ void setPivotThreshold(const RealScalar& tol)
+ {
+ m_useDefaultThreshold = false;
+ m_tolerance = tol;
+ }
/** \returns a pointer to the SPQR workspace */
cholmod_common *cholmodCommon() const { return &m_cc; }
@@ -230,6 +253,7 @@ class SPQR
mutable cholmod_dense *m_HTau; // The Householder coefficients
mutable Index m_rank; // The rank of the matrix
mutable cholmod_common m_cc; // Workspace and parameters
+ bool m_useDefaultThreshold; // Use default threshold
template<typename ,typename > friend struct SPQR_QProduct;
};
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h
index c57f2974c..1b2977419 100644
--- a/Eigen/src/SVD/JacobiSVD.h
+++ b/Eigen/src/SVD/JacobiSVD.h
@@ -742,6 +742,11 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
private:
void allocate(Index rows, Index cols, unsigned int computationOptions);
+
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
+ }
protected:
MatrixUType m_matrixU;
@@ -818,6 +823,8 @@ template<typename MatrixType, int QRPreconditioner>
JacobiSVD<MatrixType, QRPreconditioner>&
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
+ check_template_parameters();
+
using std::abs;
allocate(matrix.rows(), matrix.cols(), computationOptions);
diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h
index 0ede034ba..0c90bafbe 100644
--- a/Eigen/src/SparseCore/SparseBlock.h
+++ b/Eigen/src/SparseCore/SparseBlock.h
@@ -57,6 +57,16 @@ public:
inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols)
: m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols)
{}
+
+ inline const Scalar coeff(int row, int col) const
+ {
+ return m_matrix.coeff(row + IsRowMajor ? m_outerStart : 0, col +IsRowMajor ? 0 : m_outerStart);
+ }
+
+ inline const Scalar coeff(int index) const
+ {
+ return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
+ }
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
@@ -226,6 +236,21 @@ public:
else
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
}
+
+ inline Scalar& coeffRef(int row, int col)
+ {
+ return m_matrix.const_cast_derived().coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
+ }
+
+ inline const Scalar coeff(int row, int col) const
+ {
+ return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
+ }
+
+ inline const Scalar coeff(int index) const
+ {
+ return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
+ }
const Scalar& lastCoeff() const
{
@@ -313,6 +338,16 @@ public:
else
return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
}
+
+ inline const Scalar coeff(int row, int col) const
+ {
+ return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
+ }
+
+ inline const Scalar coeff(int index) const
+ {
+ return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
+ }
const Scalar& lastCoeff() const
{
@@ -355,7 +390,8 @@ const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatri
* is col-major (resp. row-major).
*/
template<typename Derived>
-Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
+typename SparseMatrixBase<Derived>::InnerVectorsReturnType
+SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
{
return Block<Derived,Dynamic,Dynamic,true>(derived(),
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
@@ -367,7 +403,8 @@ Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Inde
* is col-major (resp. row-major). Read-only.
*/
template<typename Derived>
-const Block<const Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
+const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
+SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
{
return Block<const Derived,Dynamic,Dynamic,true>(derived(),
IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
@@ -394,8 +431,8 @@ public:
: m_matrix(xpr),
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0),
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
- m_blockRows(xpr.rows()),
- m_blockCols(xpr.cols())
+ m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
+ m_blockCols(BlockCols==1 ? 1 : xpr.cols())
{}
/** Dynamic-size constructor
@@ -497,3 +534,4 @@ public:
} // end namespace Eigen
#endif // EIGEN_SPARSE_BLOCK_H
+
diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h
index a9084192e..ccb6ae7b7 100644
--- a/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -180,7 +180,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R
typename Res::Scalar tmp(0);
for(LhsInnerIterator it(lhs,j); it ;++it)
tmp += it.value() * rhs.coeff(it.index(),c);
- res.coeffRef(j,c) = alpha * tmp;
+ res.coeffRef(j,c) += alpha * tmp;
}
}
}
diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
index 485e85bec..6b2169a7b 100644
--- a/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -404,8 +404,10 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
const ConstInnerVectorReturnType innerVector(Index outer) const;
// set of inner-vectors
- Block<Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize);
- const Block<const Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize) const;
+ typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
+ typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
+ InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);
+ const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;
/** \internal use operator= */
template<typename DenseDerived>
diff --git a/Eigen/src/SparseCore/TriangularSolver.h b/Eigen/src/SparseCore/TriangularSolver.h
index cb8ad82b4..ccc12af79 100644
--- a/Eigen/src/SparseCore/TriangularSolver.h
+++ b/Eigen/src/SparseCore/TriangularSolver.h
@@ -69,7 +69,7 @@ struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
for(int i=lhs.rows()-1 ; i>=0 ; --i)
{
Scalar tmp = other.coeff(i,col);
- Scalar l_ii = 0;
+ Scalar l_ii(0);
typename Lhs::InnerIterator it(lhs, i);
while(it && it.index()<i)
++it;
diff --git a/Eigen/src/SparseLU/SparseLU.h b/Eigen/src/SparseLU/SparseLU.h
index d6f5d59ca..4514cfd9c 100644
--- a/Eigen/src/SparseLU/SparseLU.h
+++ b/Eigen/src/SparseLU/SparseLU.h
@@ -268,7 +268,8 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
{
if(it.index() == j)
{
- det *= (std::abs)(it.value());
+ using std::abs;
+ det *= abs(it.value());
break;
}
}
@@ -295,7 +296,8 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
if(it.row() < j) continue;
if(it.row() == j)
{
- det += (std::log)((std::abs)(it.value()));
+ using std::log; using std::abs;
+ det += log(abs(it.value()));
break;
}
}
@@ -303,21 +305,64 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
return det;
}
- /** \returns A number representing the sign of the determinant
- *
- * \sa absDeterminant(), logAbsDeterminant()
- */
- Scalar signDeterminant()
- {
- eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
- return Scalar(m_detPermR);
- }
+ /** \returns A number representing the sign of the determinant
+ *
+ * \sa absDeterminant(), logAbsDeterminant()
+ */
+ Scalar signDeterminant()
+ {
+ eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
+ // Initialize with the determinant of the row matrix
+ Index det = 1;
+ // Note that the diagonal blocks of U are stored in supernodes,
+ // which are available in the L part :)
+ for (Index j = 0; j < this->cols(); ++j)
+ {
+ for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
+ {
+ if(it.index() == j)
+ {
+ if(it.value()<0)
+ det = -det;
+ else if(it.value()==0)
+ return 0;
+ break;
+ }
+ }
+ }
+ return det * m_detPermR * m_detPermC;
+ }
+
+ /** \returns The determinant of the matrix.
+ *
+ * \sa absDeterminant(), logAbsDeterminant()
+ */
+ Scalar determinant()
+ {
+ eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
+ // Initialize with the determinant of the row matrix
+ Scalar det = Scalar(1.);
+ // Note that the diagonal blocks of U are stored in supernodes,
+ // which are available in the L part :)
+ for (Index j = 0; j < this->cols(); ++j)
+ {
+ for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
+ {
+ if(it.index() == j)
+ {
+ det *= it.value();
+ break;
+ }
+ }
+ }
+ return det * Scalar(m_detPermR * m_detPermC);
+ }
protected:
// Functions
void initperfvalues()
{
- m_perfv.panel_size = 1;
+ m_perfv.panel_size = 16;
m_perfv.relax = 1;
m_perfv.maxsuper = 128;
m_perfv.rowblk = 16;
@@ -345,8 +390,8 @@ class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typ
// values for performance
internal::perfvalues<Index> m_perfv;
RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
- Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
- Index m_detPermR; // Determinant of the coefficient matrix
+ Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
+ Index m_detPermR, m_detPermC; // Determinants of the permutation matrices
private:
// Disable copy constructor
SparseLU (const SparseLU& );
@@ -622,7 +667,8 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
}
// Update the determinant of the row permutation matrix
- if (pivrow != jj) m_detPermR *= -1;
+ // FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot.
+ if (pivrow != jj) m_detPermR = -m_detPermR;
// Prune columns (0:jj-1) using column jj
Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
@@ -637,10 +683,13 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
jcol += panel_size; // Move to the next panel
} // end for -- end elimination
+ m_detPermR = m_perm_r.determinant();
+ m_detPermC = m_perm_c.determinant();
+
// Count the number of nonzeros in factors
Base::countnz(n, m_nnzL, m_nnzU, m_glu);
// Apply permutation to the L subscripts
- Base::fixupL(n, m_perm_r.indices(), m_glu);
+ Base::fixupL(n, m_perm_r.indices(), m_glu);
// Create supernode matrix L
m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
diff --git a/Eigen/src/SparseLU/SparseLU_pivotL.h b/Eigen/src/SparseLU/SparseLU_pivotL.h
index ddcd4ec98..457789c78 100644
--- a/Eigen/src/SparseLU/SparseLU_pivotL.h
+++ b/Eigen/src/SparseLU/SparseLU_pivotL.h
@@ -77,7 +77,8 @@ Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& dia
RealScalar rtemp;
Index isub, icol, itemp, k;
for (isub = nsupc; isub < nsupr; ++isub) {
- rtemp = std::abs(lu_col_ptr[isub]);
+ using std::abs;
+ rtemp = abs(lu_col_ptr[isub]);
if (rtemp > pivmax) {
pivmax = rtemp;
pivptr = isub;
@@ -101,7 +102,8 @@ Index SparseLUImpl<Scalar,Index>::pivotL(const Index jcol, const RealScalar& dia
if (diag >= 0 )
{
// Diagonal element exists
- rtemp = std::abs(lu_col_ptr[diag]);
+ using std::abs;
+ rtemp = abs(lu_col_ptr[diag]);
if (rtemp != 0.0 && rtemp >= thresh) pivptr = diag;
}
pivrow = lsub_ptr[pivptr];
diff --git a/Eigen/src/plugins/ArrayCwiseBinaryOps.h b/Eigen/src/plugins/ArrayCwiseBinaryOps.h
index 5c8c476ee..1951286f3 100644
--- a/Eigen/src/plugins/ArrayCwiseBinaryOps.h
+++ b/Eigen/src/plugins/ArrayCwiseBinaryOps.h
@@ -70,6 +70,43 @@ max
return (max)(Derived::PlainObject::Constant(rows(), cols(), other));
}
+
+#define EIGEN_MAKE_CWISE_COMP_OP(OP, COMPARATOR) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived> \
+OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
+{ \
+ return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const OtherDerived>(derived(), other.derived()); \
+}\
+typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const Derived, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject> > Cmp ## COMPARATOR ## ReturnType; \
+typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_ ## COMPARATOR>, const CwiseNullaryOp<internal::scalar_constant_op<Scalar>, PlainObject>, const Derived > RCmp ## COMPARATOR ## ReturnType; \
+EIGEN_STRONG_INLINE const Cmp ## COMPARATOR ## ReturnType \
+OP(const Scalar& s) const { \
+ return this->OP(Derived::PlainObject::Constant(rows(), cols(), s)); \
+} \
+friend EIGEN_STRONG_INLINE const RCmp ## COMPARATOR ## ReturnType \
+OP(const Scalar& s, const Derived& d) { \
+ return Derived::PlainObject::Constant(d.rows(), d.cols(), s).OP(d); \
+}
+
+#define EIGEN_MAKE_CWISE_COMP_R_OP(OP, R_OP, RCOMPARATOR) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived> \
+OP(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const \
+{ \
+ return CwiseBinaryOp<internal::scalar_cmp_op<Scalar, internal::cmp_##RCOMPARATOR>, const OtherDerived, const Derived>(other.derived(), derived()); \
+} \
+\
+inline const RCmp ## RCOMPARATOR ## ReturnType \
+OP(const Scalar& s) const { \
+ return Derived::PlainObject::Constant(rows(), cols(), s).R_OP(*this); \
+} \
+friend inline const Cmp ## RCOMPARATOR ## ReturnType \
+OP(const Scalar& s, const Derived& d) { \
+ return d.R_OP(Derived::PlainObject::Constant(d.rows(), d.cols(), s)); \
+}
+
+
/** \returns an expression of the coefficient-wise \< operator of *this and \a other
*
* Example: \include Cwise_less.cpp
@@ -77,7 +114,7 @@ max
*
* \sa all(), any(), operator>(), operator<=()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(operator<,std::less)
+EIGEN_MAKE_CWISE_COMP_OP(operator<, LT)
/** \returns an expression of the coefficient-wise \<= operator of *this and \a other
*
@@ -86,7 +123,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator<,std::less)
*
* \sa all(), any(), operator>=(), operator<()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(operator<=,std::less_equal)
+EIGEN_MAKE_CWISE_COMP_OP(operator<=, LE)
/** \returns an expression of the coefficient-wise \> operator of *this and \a other
*
@@ -95,7 +132,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator<=,std::less_equal)
*
* \sa all(), any(), operator>=(), operator<()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(operator>,std::greater)
+EIGEN_MAKE_CWISE_COMP_R_OP(operator>, operator<, LT)
/** \returns an expression of the coefficient-wise \>= operator of *this and \a other
*
@@ -104,7 +141,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator>,std::greater)
*
* \sa all(), any(), operator>(), operator<=()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(operator>=,std::greater_equal)
+EIGEN_MAKE_CWISE_COMP_R_OP(operator>=, operator<=, LE)
/** \returns an expression of the coefficient-wise == operator of *this and \a other
*
@@ -118,7 +155,7 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator>=,std::greater_equal)
*
* \sa all(), any(), isApprox(), isMuchSmallerThan()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(operator==,std::equal_to)
+EIGEN_MAKE_CWISE_COMP_OP(operator==, EQ)
/** \returns an expression of the coefficient-wise != operator of *this and \a other
*
@@ -132,7 +169,10 @@ EIGEN_MAKE_CWISE_BINARY_OP(operator==,std::equal_to)
*
* \sa all(), any(), isApprox(), isMuchSmallerThan()
*/
-EIGEN_MAKE_CWISE_BINARY_OP(operator!=,std::not_equal_to)
+EIGEN_MAKE_CWISE_COMP_OP(operator!=, NEQ)
+
+#undef EIGEN_MAKE_CWISE_COMP_OP
+#undef EIGEN_MAKE_CWISE_COMP_R_OP
// scalar addition
@@ -209,3 +249,5 @@ operator||(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_OF_BOOL);
return CwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>(derived(),other.derived());
}
+
+
diff --git a/Eigen/src/plugins/ArrayCwiseUnaryOps.h b/Eigen/src/plugins/ArrayCwiseUnaryOps.h
index a59636790..1c3ed3fcd 100644
--- a/Eigen/src/plugins/ArrayCwiseUnaryOps.h
+++ b/Eigen/src/plugins/ArrayCwiseUnaryOps.h
@@ -185,19 +185,3 @@ cube() const
{
return derived();
}
-
-#define EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(METHOD_NAME,FUNCTOR) \
- inline const CwiseUnaryOp<std::binder2nd<FUNCTOR<Scalar> >, const Derived> \
- METHOD_NAME(const Scalar& s) const { \
- return CwiseUnaryOp<std::binder2nd<FUNCTOR<Scalar> >, const Derived> \
- (derived(), std::bind2nd(FUNCTOR<Scalar>(), s)); \
- }
-
-EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator==, std::equal_to)
-EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator!=, std::not_equal_to)
-EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<, std::less)
-EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator<=, std::less_equal)
-EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>, std::greater)
-EIGEN_MAKE_SCALAR_CWISE_UNARY_OP(operator>=, std::greater_equal)
-
-
diff --git a/Eigen/src/plugins/MatrixCwiseBinaryOps.h b/Eigen/src/plugins/MatrixCwiseBinaryOps.h
index 7f62149e0..c4a042b70 100644
--- a/Eigen/src/plugins/MatrixCwiseBinaryOps.h
+++ b/Eigen/src/plugins/MatrixCwiseBinaryOps.h
@@ -124,3 +124,20 @@ cwiseQuotient(const EIGEN_CURRENT_STORAGE_BASE_CLASS<OtherDerived> &other) const
{
return CwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived());
}
+
+typedef CwiseBinaryOp<internal::scalar_cmp_op<Scalar,internal::cmp_EQ>, const Derived, const ConstantReturnType> CwiseScalarEqualReturnType;
+
+/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s
+ *
+ * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
+ * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
+ * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
+ * isMuchSmallerThan().
+ *
+ * \sa cwiseEqual(const MatrixBase<OtherDerived> &) const
+ */
+inline const CwiseScalarEqualReturnType
+cwiseEqual(const Scalar& s) const
+{
+ return CwiseScalarEqualReturnType(derived(), Derived::Constant(rows(), cols(), s), internal::scalar_cmp_op<Scalar,internal::cmp_EQ>());
+}
diff --git a/Eigen/src/plugins/MatrixCwiseUnaryOps.h b/Eigen/src/plugins/MatrixCwiseUnaryOps.h
index 0cf0640ba..8de10935d 100644
--- a/Eigen/src/plugins/MatrixCwiseUnaryOps.h
+++ b/Eigen/src/plugins/MatrixCwiseUnaryOps.h
@@ -50,18 +50,3 @@ cwiseSqrt() const { return derived(); }
inline const CwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived>
cwiseInverse() const { return derived(); }
-/** \returns an expression of the coefficient-wise == operator of \c *this and a scalar \a s
- *
- * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
- * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
- * generally a far better idea to use a fuzzy comparison as provided by isApprox() and
- * isMuchSmallerThan().
- *
- * \sa cwiseEqual(const MatrixBase<OtherDerived> &) const
- */
-inline const CwiseUnaryOp<std::binder1st<std::equal_to<Scalar> >, const Derived>
-cwiseEqual(const Scalar& s) const
-{
- return CwiseUnaryOp<std::binder1st<std::equal_to<Scalar> >,const Derived>
- (derived(), std::bind1st(std::equal_to<Scalar>(), s));
-}
diff --git a/blas/xerbla.cpp b/blas/xerbla.cpp
index 0d57710fe..dd39a5244 100644
--- a/blas/xerbla.cpp
+++ b/blas/xerbla.cpp
@@ -1,7 +1,7 @@
#include <iostream>
-#if (defined __GNUC__)
+#if (defined __GNUC__) && (!defined __MINGW32__) && (!defined __CYGWIN__)
#define EIGEN_WEAK_LINKING __attribute__ ((weak))
#else
#define EIGEN_WEAK_LINKING
diff --git a/cmake/EigenTesting.cmake b/cmake/EigenTesting.cmake
index 80b2841df..f4796ca41 100644
--- a/cmake/EigenTesting.cmake
+++ b/cmake/EigenTesting.cmake
@@ -322,22 +322,29 @@ macro(ei_get_compilerver VAR)
endif()
else()
# on all other system we rely on ${CMAKE_CXX_COMPILER}
- # supporting a "--version" flag
+ # supporting a "--version" or "/version" flag
+
+ if(WIN32 AND NOT CYGWIN)
+ set(EIGEN_CXX_FLAG_VERSION "/version")
+ else()
+ set(EIGEN_CXX_FLAG_VERSION "--version")
+ endif()
# check whether the head command exists
find_program(HEAD_EXE head NO_CMAKE_ENVIRONMENT_PATH NO_CMAKE_PATH NO_CMAKE_SYSTEM_PATH)
if(HEAD_EXE)
- execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version
+ execute_process(COMMAND ${CMAKE_CXX_COMPILER} ${EIGEN_CXX_FLAG_VERSION}
COMMAND head -n 1
OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE)
else()
- execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version
+ execute_process(COMMAND ${CMAKE_CXX_COMPILER} ${EIGEN_CXX_FLAG_VERSION}
OUTPUT_VARIABLE eigen_cxx_compiler_version_string OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REGEX REPLACE "[\n\r].*" "" eigen_cxx_compiler_version_string ${eigen_cxx_compiler_version_string})
endif()
ei_get_compilerver_from_cxx_version_string("${eigen_cxx_compiler_version_string}" CNAME CVER)
set(${VAR} "${CNAME}-${CVER}")
+
endif()
endmacro(ei_get_compilerver)
diff --git a/cmake/FindMetis.cmake b/cmake/FindMetis.cmake
index e0040d320..6a0ce790c 100644
--- a/cmake/FindMetis.cmake
+++ b/cmake/FindMetis.cmake
@@ -26,7 +26,7 @@ macro(_metis_check_version)
string(REGEX MATCH "define[ \t]+METIS_VER_SUBMINOR[ \t]+([0-9]+)" _metis_subminor_version_match "${_metis_version_header}")
set(METIS_SUBMINOR_VERSION "${CMAKE_MATCH_1}")
if(NOT METIS_MAJOR_VERSION)
- message(WARNING "Could not determine Metis version. Assuming version 4.0.0")
+ message(STATUS "Could not determine Metis version. Assuming version 4.0.0")
set(METIS_VERSION 4.0.0)
else()
set(METIS_VERSION ${METIS_MAJOR_VERSION}.${METIS_MINOR_VERSION}.${METIS_SUBMINOR_VERSION})
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index 8a493031c..2fc2a0dfc 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -91,7 +91,8 @@ add_custom_target(doc ALL
COMMAND doxygen Doxyfile-unsupported
COMMAND ${CMAKE_COMMAND} -E rename html eigen-doc
COMMAND ${CMAKE_COMMAND} -E remove eigen-doc/eigen-doc.tgz
- COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc/eigen-doc.tgz eigen-doc
+ COMMAND ${CMAKE_COMMAND} -E tar cfz eigen-doc.tgz eigen-doc
+ COMMAND ${CMAKE_COMMAND} -E rename eigen-doc.tgz eigen-doc/eigen-doc.tgz
COMMAND ${CMAKE_COMMAND} -E rename eigen-doc html
WORKING_DIRECTORY ${Eigen_BINARY_DIR}/doc)
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index 1a2603b04..696dd2af1 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -222,7 +222,7 @@ ALIASES = "only_for_vectors=This is only for vectors (either row-
"note_about_using_kernel_to_study_multiple_solutions=If you need a complete analysis of the space of solutions, take the one solution obtained by this method and add to it elements of the kernel, as determined by kernel()." \
"note_about_checking_solutions=This method just tries to find as good a solution as possible. If you want to check whether a solution exists or if it is accurate, just call this function to get a result and then compute the error of this result, or use MatrixBase::isApprox() directly, for instance like this: \code bool a_solution_exists = (A*result).isApprox(b, precision); \endcode This method avoids dividing by zero, so that the non-existence of a solution doesn't by itself mean that you'll get \c inf or \c nan values." \
"note_try_to_help_rvo=This function returns the result by value. In order to make that efficient, it is implemented as just a return statement using a special constructor, hopefully allowing the compiler to perform a RVO (return value optimization)." \
- "nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\"
+ "nonstableyet=\warning This is not considered to be part of the stable public API yet. Changes may happen in future releases. See \ref Experimental \"Experimental parts of Eigen\""
ALIASES += "eigenAutoToc= "
ALIASES += "eigenManualPage=\defgroup"
@@ -315,7 +315,7 @@ IDL_PROPERTY_SUPPORT = YES
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
-DISTRIBUTE_GROUP_DOC = NO
+DISTRIBUTE_GROUP_DOC = YES
# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
# the same type (for instance a group of public functions) to be put as a
@@ -365,7 +365,7 @@ TYPEDEF_HIDES_STRUCT = NO
# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
# corresponding to a cache size of 2^16 = 65536 symbols.
-SYMBOL_CACHE_SIZE = 0
+# SYMBOL_CACHE_SIZE = 0
# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be
# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given
@@ -562,7 +562,7 @@ GENERATE_BUGLIST = NO
# disable (NO) the deprecated list. This list is created by putting
# \deprecated commands in the documentation.
-GENERATE_DEPRECATEDLIST= NO
+GENERATE_DEPRECATEDLIST= YES
# The ENABLED_SECTIONS tag can be used to enable conditional
# documentation sections, marked by \if sectionname ... \endif.
@@ -1465,13 +1465,13 @@ XML_OUTPUT = xml
# which can be used by a validating XML parser to check the
# syntax of the XML files.
-XML_SCHEMA =
+# XML_SCHEMA =
# The XML_DTD tag can be used to specify an XML DTD,
# which can be used by a validating XML parser to check the
# syntax of the XML files.
-XML_DTD =
+# XML_DTD =
# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
# dump the program listings (including syntax highlighting
@@ -1699,7 +1699,7 @@ DOT_NUM_THREADS = 0
# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
# directory containing the font.
-DOT_FONTNAME = FreeSans
+DOT_FONTNAME =
# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
# The default size is 10pt.
diff --git a/doc/TopicMultithreading.dox b/doc/TopicMultithreading.dox
index f7d082668..7db2b0e07 100644
--- a/doc/TopicMultithreading.dox
+++ b/doc/TopicMultithreading.dox
@@ -17,7 +17,7 @@ You can control the number of thread that will be used using either the OpenMP A
Unless setNbThreads has been called, Eigen uses the number of threads specified by OpenMP. You can restore this bahavior by calling \code setNbThreads(0); \endcode
You can query the number of threads that will be used with:
\code
-n = Eigen::nbThreads(n);
+n = Eigen::nbThreads( );
\endcode
You can disable Eigen's multi threading at compile time by defining the EIGEN_DONT_PARALLELIZE preprocessor token.
diff --git a/doc/special_examples/CMakeLists.txt b/doc/special_examples/CMakeLists.txt
index f8a0148c8..3ab75dbfe 100644
--- a/doc/special_examples/CMakeLists.txt
+++ b/doc/special_examples/CMakeLists.txt
@@ -10,9 +10,10 @@ if(QT4_FOUND)
target_link_libraries(Tutorial_sparse_example ${EIGEN_STANDARD_LIBRARIES_TO_LINK_TO} ${QT_QTCORE_LIBRARY} ${QT_QTGUI_LIBRARY})
add_custom_command(
- TARGET Tutorial_sparse_example
- POST_BUILD
- COMMAND Tutorial_sparse_example ARGS ${CMAKE_CURRENT_BINARY_DIR}/../html/Tutorial_sparse_example.jpeg
+ TARGET Tutorial_sparse_example
+ POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/../html/
+ COMMAND Tutorial_sparse_example ARGS ${CMAKE_CURRENT_BINARY_DIR}/../html/Tutorial_sparse_example.jpeg
)
add_dependencies(all_examples Tutorial_sparse_example)
diff --git a/failtest/CMakeLists.txt b/failtest/CMakeLists.txt
index 5c4860237..cadc6a255 100644
--- a/failtest/CMakeLists.txt
+++ b/failtest/CMakeLists.txt
@@ -32,6 +32,17 @@ ei_add_failtest("ref_3")
ei_add_failtest("ref_4")
ei_add_failtest("ref_5")
+ei_add_failtest("partialpivlu_int")
+ei_add_failtest("fullpivlu_int")
+ei_add_failtest("llt_int")
+ei_add_failtest("ldlt_int")
+ei_add_failtest("qr_int")
+ei_add_failtest("colpivqr_int")
+ei_add_failtest("fullpivqr_int")
+ei_add_failtest("jacobisvd_int")
+ei_add_failtest("eigensolver_int")
+ei_add_failtest("eigensolver_cplx")
+
if (EIGEN_FAILTEST_FAILURE_COUNT)
message(FATAL_ERROR
"${EIGEN_FAILTEST_FAILURE_COUNT} out of ${EIGEN_FAILTEST_COUNT} failtests FAILED. "
diff --git a/failtest/colpivqr_int.cpp b/failtest/colpivqr_int.cpp
new file mode 100644
index 000000000..db11910d4
--- /dev/null
+++ b/failtest/colpivqr_int.cpp
@@ -0,0 +1,14 @@
+#include "../Eigen/QR"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define SCALAR int
+#else
+#define SCALAR float
+#endif
+
+using namespace Eigen;
+
+int main()
+{
+ ColPivHouseholderQR<Matrix<SCALAR,Dynamic,Dynamic> > qr(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
+}
diff --git a/failtest/eigensolver_cplx.cpp b/failtest/eigensolver_cplx.cpp
new file mode 100644
index 000000000..c2e21e189
--- /dev/null
+++ b/failtest/eigensolver_cplx.cpp
@@ -0,0 +1,14 @@
+#include "../Eigen/Eigenvalues"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define SCALAR std::complex<double>
+#else
+#define SCALAR float
+#endif
+
+using namespace Eigen;
+
+int main()
+{
+ EigenSolver<Matrix<SCALAR,Dynamic,Dynamic> > eig(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
+}
diff --git a/failtest/eigensolver_int.cpp b/failtest/eigensolver_int.cpp
new file mode 100644
index 000000000..eda8dc20b
--- /dev/null
+++ b/failtest/eigensolver_int.cpp
@@ -0,0 +1,14 @@
+#include "../Eigen/Eigenvalues"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define SCALAR int
+#else
+#define SCALAR float
+#endif
+
+using namespace Eigen;
+
+int main()
+{
+ EigenSolver<Matrix<SCALAR,Dynamic,Dynamic> > eig(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
+}
diff --git a/failtest/fullpivlu_int.cpp b/failtest/fullpivlu_int.cpp
new file mode 100644
index 000000000..e9d2c6eb3
--- /dev/null
+++ b/failtest/fullpivlu_int.cpp
@@ -0,0 +1,14 @@
+#include "../Eigen/LU"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define SCALAR int
+#else
+#define SCALAR float
+#endif
+
+using namespace Eigen;
+
+int main()
+{
+ FullPivLU<Matrix<SCALAR,Dynamic,Dynamic> > lu(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
+}
diff --git a/failtest/fullpivqr_int.cpp b/failtest/fullpivqr_int.cpp
new file mode 100644
index 000000000..d182a7b6b
--- /dev/null
+++ b/failtest/fullpivqr_int.cpp
@@ -0,0 +1,14 @@
+#include "../Eigen/QR"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define SCALAR int
+#else
+#define SCALAR float
+#endif
+
+using namespace Eigen;
+
+int main()
+{
+ FullPivHouseholderQR<Matrix<SCALAR,Dynamic,Dynamic> > qr(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
+}
diff --git a/failtest/jacobisvd_int.cpp b/failtest/jacobisvd_int.cpp
new file mode 100644
index 000000000..12790aef1
--- /dev/null
+++ b/failtest/jacobisvd_int.cpp
@@ -0,0 +1,14 @@
+#include "../Eigen/SVD"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define SCALAR int
+#else
+#define SCALAR float
+#endif
+
+using namespace Eigen;
+
+int main()
+{
+ JacobiSVD<Matrix<SCALAR,Dynamic,Dynamic> > qr(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
+}
diff --git a/failtest/ldlt_int.cpp b/failtest/ldlt_int.cpp
new file mode 100644
index 000000000..243e45746
--- /dev/null
+++ b/failtest/ldlt_int.cpp
@@ -0,0 +1,14 @@
+#include "../Eigen/Cholesky"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define SCALAR int
+#else
+#define SCALAR float
+#endif
+
+using namespace Eigen;
+
+int main()
+{
+ LDLT<Matrix<SCALAR,Dynamic,Dynamic> > ldlt(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
+}
diff --git a/failtest/llt_int.cpp b/failtest/llt_int.cpp
new file mode 100644
index 000000000..cb020650d
--- /dev/null
+++ b/failtest/llt_int.cpp
@@ -0,0 +1,14 @@
+#include "../Eigen/Cholesky"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define SCALAR int
+#else
+#define SCALAR float
+#endif
+
+using namespace Eigen;
+
+int main()
+{
+ LLT<Matrix<SCALAR,Dynamic,Dynamic> > llt(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
+}
diff --git a/failtest/partialpivlu_int.cpp b/failtest/partialpivlu_int.cpp
new file mode 100644
index 000000000..98ef282ea
--- /dev/null
+++ b/failtest/partialpivlu_int.cpp
@@ -0,0 +1,14 @@
+#include "../Eigen/LU"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define SCALAR int
+#else
+#define SCALAR float
+#endif
+
+using namespace Eigen;
+
+int main()
+{
+ PartialPivLU<Matrix<SCALAR,Dynamic,Dynamic> > lu(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
+}
diff --git a/failtest/qr_int.cpp b/failtest/qr_int.cpp
new file mode 100644
index 000000000..ce200e818
--- /dev/null
+++ b/failtest/qr_int.cpp
@@ -0,0 +1,14 @@
+#include "../Eigen/QR"
+
+#ifdef EIGEN_SHOULD_FAIL_TO_BUILD
+#define SCALAR int
+#else
+#define SCALAR float
+#endif
+
+using namespace Eigen;
+
+int main()
+{
+ HouseholderQR<Matrix<SCALAR,Dynamic,Dynamic> > qr(Matrix<SCALAR,Dynamic,Dynamic>::Random(10,10));
+}
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index d32451df6..f5f208a37 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -283,3 +283,9 @@ mark_as_advanced(EIGEN_TEST_EIGEN2)
if(EIGEN_TEST_EIGEN2)
add_subdirectory(eigen2)
endif()
+
+
+option(EIGEN_TEST_BUILD_DOCUMENTATION "Test building the doxygen documentation" OFF)
+IF(EIGEN_TEST_BUILD_DOCUMENTATION)
+ add_dependencies(buildtests doc)
+ENDIF()
diff --git a/test/array.cpp b/test/array.cpp
index c607da631..68f6b3d7a 100644
--- a/test/array.cpp
+++ b/test/array.cpp
@@ -109,6 +109,8 @@ template<typename ArrayType> void comparisons(const ArrayType& m)
VERIFY(! (m1 < m3).all() );
VERIFY(! (m1 > m3).all() );
}
+ VERIFY(!(m1 > m2 && m1 < m2).any());
+ VERIFY((m1 <= m2 || m1 >= m2).all());
// comparisons to scalar
VERIFY( (m1 != (m1(r,c)+1) ).any() );
diff --git a/test/array_for_matrix.cpp b/test/array_for_matrix.cpp
index 9a50f99ab..9667e1f14 100644
--- a/test/array_for_matrix.cpp
+++ b/test/array_for_matrix.cpp
@@ -102,6 +102,7 @@ template<typename MatrixType> void comparisons(const MatrixType& m)
VERIFY( (m1.array() > (m1(r,c)-1) ).any() );
VERIFY( (m1.array() < (m1(r,c)+1) ).any() );
VERIFY( (m1.array() == m1(r,c) ).any() );
+ VERIFY( m1.cwiseEqual(m1(r,c)).any() );
// test Select
VERIFY_IS_APPROX( (m1.array()<m2.array()).select(m1,m2), m1.cwiseMin(m2) );
diff --git a/test/conjugate_gradient.cpp b/test/conjugate_gradient.cpp
index 869051b31..019cc4d64 100644
--- a/test/conjugate_gradient.cpp
+++ b/test/conjugate_gradient.cpp
@@ -12,13 +12,15 @@
template<typename T> void test_conjugate_gradient_T()
{
- ConjugateGradient<SparseMatrix<T>, Lower> cg_colmajor_lower_diag;
- ConjugateGradient<SparseMatrix<T>, Upper> cg_colmajor_upper_diag;
+ ConjugateGradient<SparseMatrix<T>, Lower > cg_colmajor_lower_diag;
+ ConjugateGradient<SparseMatrix<T>, Upper > cg_colmajor_upper_diag;
+ ConjugateGradient<SparseMatrix<T>, Lower|Upper> cg_colmajor_loup_diag;
ConjugateGradient<SparseMatrix<T>, Lower, IdentityPreconditioner> cg_colmajor_lower_I;
ConjugateGradient<SparseMatrix<T>, Upper, IdentityPreconditioner> cg_colmajor_upper_I;
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_lower_diag) );
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_diag) );
+ CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_loup_diag) );
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_lower_I) );
CALL_SUBTEST( check_sparse_spd_solving(cg_colmajor_upper_I) );
}
diff --git a/test/lu.cpp b/test/lu.cpp
index 25f86755a..374652694 100644
--- a/test/lu.cpp
+++ b/test/lu.cpp
@@ -100,7 +100,9 @@ template<typename MatrixType> void lu_invertible()
LU.h
*/
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
- int size = internal::random<int>(1,EIGEN_TEST_MAX_SIZE);
+ DenseIndex size = MatrixType::RowsAtCompileTime;
+ if( size==Dynamic)
+ size = internal::random<DenseIndex>(1,EIGEN_TEST_MAX_SIZE);
MatrixType m1(size, size), m2(size, size), m3(size, size);
FullPivLU<MatrixType> lu;
@@ -122,6 +124,10 @@ template<typename MatrixType> void lu_invertible()
m2 = lu.solve(m3);
VERIFY_IS_APPROX(m3, m1*m2);
VERIFY_IS_APPROX(m2, lu.inverse()*m3);
+
+ // Regression test for Bug 302
+ MatrixType m4 = MatrixType::Random(size,size);
+ VERIFY_IS_APPROX(lu.solve(m3*m4), lu.solve(m3)*m4);
}
template<typename MatrixType> void lu_partial_piv()
@@ -171,6 +177,7 @@ void test_lu()
{
for(int i = 0; i < g_repeat; i++) {
CALL_SUBTEST_1( lu_non_invertible<Matrix3f>() );
+ CALL_SUBTEST_1( lu_invertible<Matrix3f>() );
CALL_SUBTEST_1( lu_verify_assert<Matrix3f>() );
CALL_SUBTEST_2( (lu_non_invertible<Matrix<double, 4, 6> >()) );
diff --git a/test/mapped_matrix.cpp b/test/mapped_matrix.cpp
index de9dbbde3..58904fa37 100644
--- a/test/mapped_matrix.cpp
+++ b/test/mapped_matrix.cpp
@@ -114,6 +114,28 @@ template<typename PlainObjectType> void check_const_correctness(const PlainObjec
VERIFY( !(Map<ConstPlainObjectType, Aligned>::Flags & LvalueBit) );
}
+template<typename Scalar>
+void map_not_aligned_on_scalar()
+{
+ typedef Matrix<Scalar,Dynamic,Dynamic> MatrixType;
+ typedef typename MatrixType::Index Index;
+ Index size = 11;
+ Scalar* array1 = internal::aligned_new<Scalar>((size+1)*(size+1)+1);
+ Scalar* array2 = reinterpret_cast<Scalar*>(sizeof(Scalar)/2+std::size_t(array1));
+ Map<MatrixType,0,OuterStride<> > map2(array2, size, size, OuterStride<>(size+1));
+ MatrixType m2 = MatrixType::Random(size,size);
+ map2 = m2;
+ VERIFY_IS_EQUAL(m2, map2);
+
+ typedef Matrix<Scalar,Dynamic,1> VectorType;
+ Map<VectorType> map3(array2, size);
+ MatrixType v3 = VectorType::Random(size);
+ map3 = v3;
+ VERIFY_IS_EQUAL(v3, map3);
+
+ internal::aligned_delete(array1, (size+1)*(size+1)+1);
+}
+
void test_mapped_matrix()
{
for(int i = 0; i < g_repeat; i++) {
@@ -137,5 +159,7 @@ void test_mapped_matrix()
CALL_SUBTEST_8( map_static_methods(RowVector3d()) );
CALL_SUBTEST_9( map_static_methods(VectorXcd(8)) );
CALL_SUBTEST_10( map_static_methods(VectorXf(12)) );
+
+ CALL_SUBTEST_11( map_not_aligned_on_scalar<double>() );
}
}
diff --git a/test/product_extra.cpp b/test/product_extra.cpp
index 744a1ef7f..ea2486937 100644
--- a/test/product_extra.cpp
+++ b/test/product_extra.cpp
@@ -109,8 +109,67 @@ void mat_mat_scalar_scalar_product()
double det = 6.0, wt = 0.5;
VERIFY_IS_APPROX(dNdxy.transpose()*dNdxy*det*wt, det*wt*dNdxy.transpose()*dNdxy);
}
+
+template <typename MatrixType>
+void zero_sized_objects(const MatrixType& m)
+{
+ typedef typename MatrixType::Scalar Scalar;
+ const int PacketSize = internal::packet_traits<Scalar>::size;
+ const int PacketSize1 = PacketSize>1 ? PacketSize-1 : 1;
+ DenseIndex rows = m.rows();
+ DenseIndex cols = m.cols();
+
+ {
+ MatrixType res, a(rows,0), b(0,cols);
+ VERIFY_IS_APPROX( (res=a*b), MatrixType::Zero(rows,cols) );
+ VERIFY_IS_APPROX( (res=a*a.transpose()), MatrixType::Zero(rows,rows) );
+ VERIFY_IS_APPROX( (res=b.transpose()*b), MatrixType::Zero(cols,cols) );
+ VERIFY_IS_APPROX( (res=b.transpose()*a.transpose()), MatrixType::Zero(cols,rows) );
+ }
+
+ {
+ MatrixType res, a(rows,cols), b(cols,0);
+ res = a*b;
+ VERIFY(res.rows()==rows && res.cols()==0);
+ b.resize(0,rows);
+ res = b*a;
+ VERIFY(res.rows()==0 && res.cols()==cols);
+ }
+
+ {
+ Matrix<Scalar,PacketSize,0> a;
+ Matrix<Scalar,0,1> b;
+ Matrix<Scalar,PacketSize,1> res;
+ VERIFY_IS_APPROX( (res=a*b), MatrixType::Zero(PacketSize,1) );
+ VERIFY_IS_APPROX( (res=a.lazyProduct(b)), MatrixType::Zero(PacketSize,1) );
+ }
+
+ {
+ Matrix<Scalar,PacketSize1,0> a;
+ Matrix<Scalar,0,1> b;
+ Matrix<Scalar,PacketSize1,1> res;
+ VERIFY_IS_APPROX( (res=a*b), MatrixType::Zero(PacketSize1,1) );
+ VERIFY_IS_APPROX( (res=a.lazyProduct(b)), MatrixType::Zero(PacketSize1,1) );
+ }
-void zero_sized_objects()
+ {
+ Matrix<Scalar,PacketSize,Dynamic> a(PacketSize,0);
+ Matrix<Scalar,Dynamic,1> b(0,1);
+ Matrix<Scalar,PacketSize,1> res;
+ VERIFY_IS_APPROX( (res=a*b), MatrixType::Zero(PacketSize,1) );
+ VERIFY_IS_APPROX( (res=a.lazyProduct(b)), MatrixType::Zero(PacketSize,1) );
+ }
+
+ {
+ Matrix<Scalar,PacketSize1,Dynamic> a(PacketSize1,0);
+ Matrix<Scalar,Dynamic,1> b(0,1);
+ Matrix<Scalar,PacketSize1,1> res;
+ VERIFY_IS_APPROX( (res=a*b), MatrixType::Zero(PacketSize1,1) );
+ VERIFY_IS_APPROX( (res=a.lazyProduct(b)), MatrixType::Zero(PacketSize1,1) );
+ }
+}
+
+void bug_127()
{
// Bug 127
//
@@ -171,7 +230,8 @@ void test_product_extra()
CALL_SUBTEST_2( mat_mat_scalar_scalar_product() );
CALL_SUBTEST_3( product_extra(MatrixXcf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2), internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))) );
CALL_SUBTEST_4( product_extra(MatrixXcd(internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2), internal::random<int>(1,EIGEN_TEST_MAX_SIZE/2))) );
+ CALL_SUBTEST_1( zero_sized_objects(MatrixXf(internal::random<int>(1,EIGEN_TEST_MAX_SIZE), internal::random<int>(1,EIGEN_TEST_MAX_SIZE))) );
}
- CALL_SUBTEST_5( zero_sized_objects() );
+ CALL_SUBTEST_5( bug_127() );
CALL_SUBTEST_6( unaligned_objects() );
}
diff --git a/test/product_mmtr.cpp b/test/product_mmtr.cpp
index 7d6746800..aeba009f4 100644
--- a/test/product_mmtr.cpp
+++ b/test/product_mmtr.cpp
@@ -24,8 +24,8 @@ template<typename Scalar> void mmtr(int size)
DenseIndex othersize = internal::random<DenseIndex>(1,200);
- MatrixColMaj matc(size, size);
- MatrixRowMaj matr(size, size);
+ MatrixColMaj matc = MatrixColMaj::Zero(size, size);
+ MatrixRowMaj matr = MatrixRowMaj::Zero(size, size);
MatrixColMaj ref1(size, size), ref2(size, size);
MatrixColMaj soc(size,othersize); soc.setRandom();
diff --git a/test/real_qz.cpp b/test/real_qz.cpp
index 7d743a734..a1766c6d9 100644
--- a/test/real_qz.cpp
+++ b/test/real_qz.cpp
@@ -25,6 +25,22 @@ template<typename MatrixType> void real_qz(const MatrixType& m)
MatrixType A = MatrixType::Random(dim,dim),
B = MatrixType::Random(dim,dim);
+
+ // Regression test for bug 985: Randomly set rows or columns to zero
+ Index k=internal::random<Index>(0, dim-1);
+ switch(internal::random<int>(0,10)) {
+ case 0:
+ A.row(k).setZero(); break;
+ case 1:
+ A.col(k).setZero(); break;
+ case 2:
+ B.row(k).setZero(); break;
+ case 3:
+ B.col(k).setZero(); break;
+ default:
+ break;
+ }
+
RealQZ<MatrixType> qz(A,B);
VERIFY_IS_EQUAL(qz.info(), Success);
diff --git a/test/ref.cpp b/test/ref.cpp
index 32eb31048..44bbd3bf1 100644
--- a/test/ref.cpp
+++ b/test/ref.cpp
@@ -229,6 +229,28 @@ void call_ref()
VERIFY_EVALUATION_COUNT( call_ref_7(c,c), 0);
}
+typedef Matrix<double,Dynamic,Dynamic,RowMajor> RowMatrixXd;
+int test_ref_overload_fun1(Ref<MatrixXd> ) { return 1; }
+int test_ref_overload_fun1(Ref<RowMatrixXd> ) { return 2; }
+int test_ref_overload_fun1(Ref<MatrixXf> ) { return 3; }
+
+int test_ref_overload_fun2(Ref<const MatrixXd> ) { return 4; }
+int test_ref_overload_fun2(Ref<const MatrixXf> ) { return 5; }
+
+// See also bug 969
+void test_ref_overloads()
+{
+ MatrixXd Ad, Bd;
+ RowMatrixXd rAd, rBd;
+ VERIFY( test_ref_overload_fun1(Ad)==1 );
+ VERIFY( test_ref_overload_fun1(rAd)==2 );
+
+ MatrixXf Af, Bf;
+ VERIFY( test_ref_overload_fun2(Ad)==4 );
+ VERIFY( test_ref_overload_fun2(Ad+Bd)==4 );
+ VERIFY( test_ref_overload_fun2(Af+Bf)==5 );
+}
+
void test_ref()
{
for(int i = 0; i < g_repeat; i++) {
@@ -249,4 +271,6 @@ void test_ref()
CALL_SUBTEST_5( ref_matrix(MatrixXi(internal::random<int>(1,10),internal::random<int>(1,10))) );
CALL_SUBTEST_6( call_ref() );
}
+
+ CALL_SUBTEST_7( test_ref_overloads() );
}
diff --git a/test/sparse_basic.cpp b/test/sparse_basic.cpp
index 498ecfe29..ce41d713c 100644
--- a/test/sparse_basic.cpp
+++ b/test/sparse_basic.cpp
@@ -24,6 +24,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
double density = (std::max)(8./(rows*cols), 0.01);
typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
+ typedef Matrix<Scalar,1,Dynamic> RowDenseVector;
Scalar eps = 1e-6;
Scalar s1 = internal::random<Scalar>();
@@ -52,7 +53,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
refMat.coeffRef(nonzeroCoords[0].x(), nonzeroCoords[0].y()) = Scalar(5);
VERIFY_IS_APPROX(m, refMat);
- /*
+
// test InnerIterators and Block expressions
for (int t=0; t<10; ++t)
{
@@ -61,23 +62,54 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
int w = internal::random<int>(1,cols-j-1);
int h = internal::random<int>(1,rows-i-1);
- // VERIFY_IS_APPROX(m.block(i,j,h,w), refMat.block(i,j,h,w));
+ VERIFY_IS_APPROX(m.block(i,j,h,w), refMat.block(i,j,h,w));
for(int c=0; c<w; c++)
{
VERIFY_IS_APPROX(m.block(i,j,h,w).col(c), refMat.block(i,j,h,w).col(c));
for(int r=0; r<h; r++)
{
- // VERIFY_IS_APPROX(m.block(i,j,h,w).col(c).coeff(r), refMat.block(i,j,h,w).col(c).coeff(r));
+ VERIFY_IS_APPROX(m.block(i,j,h,w).col(c).coeff(r), refMat.block(i,j,h,w).col(c).coeff(r));
+ VERIFY_IS_APPROX(m.block(i,j,h,w).coeff(r,c), refMat.block(i,j,h,w).coeff(r,c));
+ }
+ }
+ for(int r=0; r<h; r++)
+ {
+ VERIFY_IS_APPROX(m.block(i,j,h,w).row(r), refMat.block(i,j,h,w).row(r));
+ for(int c=0; c<w; c++)
+ {
+ VERIFY_IS_APPROX(m.block(i,j,h,w).row(r).coeff(c), refMat.block(i,j,h,w).row(r).coeff(c));
+ VERIFY_IS_APPROX(m.block(i,j,h,w).coeff(r,c), refMat.block(i,j,h,w).coeff(r,c));
}
}
- // for(int r=0; r<h; r++)
- // {
- // VERIFY_IS_APPROX(m.block(i,j,h,w).row(r), refMat.block(i,j,h,w).row(r));
- // for(int c=0; c<w; c++)
- // {
- // VERIFY_IS_APPROX(m.block(i,j,h,w).row(r).coeff(c), refMat.block(i,j,h,w).row(r).coeff(c));
- // }
- // }
+
+ VERIFY_IS_APPROX(m.middleCols(j,w), refMat.middleCols(j,w));
+ VERIFY_IS_APPROX(m.middleRows(i,h), refMat.middleRows(i,h));
+ for(int r=0; r<h; r++)
+ {
+ VERIFY_IS_APPROX(m.middleCols(j,w).row(r), refMat.middleCols(j,w).row(r));
+ VERIFY_IS_APPROX(m.middleRows(i,h).row(r), refMat.middleRows(i,h).row(r));
+ for(int c=0; c<w; c++)
+ {
+ VERIFY_IS_APPROX(m.col(c).coeff(r), refMat.col(c).coeff(r));
+ VERIFY_IS_APPROX(m.row(r).coeff(c), refMat.row(r).coeff(c));
+
+ VERIFY_IS_APPROX(m.middleCols(j,w).coeff(r,c), refMat.middleCols(j,w).coeff(r,c));
+ VERIFY_IS_APPROX(m.middleRows(i,h).coeff(r,c), refMat.middleRows(i,h).coeff(r,c));
+ if(m.middleCols(j,w).coeff(r,c) != Scalar(0))
+ {
+ VERIFY_IS_APPROX(m.middleCols(j,w).coeffRef(r,c), refMat.middleCols(j,w).coeff(r,c));
+ }
+ if(m.middleRows(i,h).coeff(r,c) != Scalar(0))
+ {
+ VERIFY_IS_APPROX(m.middleRows(i,h).coeff(r,c), refMat.middleRows(i,h).coeff(r,c));
+ }
+ }
+ }
+ for(int c=0; c<w; c++)
+ {
+ VERIFY_IS_APPROX(m.middleCols(j,w).col(c), refMat.middleCols(j,w).col(c));
+ VERIFY_IS_APPROX(m.middleRows(i,h).col(c), refMat.middleRows(i,h).col(c));
+ }
}
for(int c=0; c<cols; c++)
@@ -91,7 +123,7 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
VERIFY_IS_APPROX(m.row(r) + m.row(r), (m + m).row(r));
VERIFY_IS_APPROX(m.row(r) + m.row(r), refMat.row(r) + refMat.row(r));
}
- */
+
// test assertion
VERIFY_RAISES_ASSERT( m.coeffRef(-1,1) = 0 );
@@ -326,6 +358,15 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
refMat2.col(i) = refMat2.col(i) * s1;
VERIFY_IS_APPROX(m2,refMat2);
}
+
+ VERIFY_IS_APPROX(DenseVector(m2.col(j0)), refMat2.col(j0));
+ VERIFY_IS_APPROX(m2.col(j0), refMat2.col(j0));
+
+ VERIFY_IS_APPROX(RowDenseVector(m2.row(j0)), refMat2.row(j0));
+ VERIFY_IS_APPROX(m2.row(j0), refMat2.row(j0));
+
+ VERIFY_IS_APPROX(m2.block(j0,j1,n0,n0), refMat2.block(j0,j1,n0,n0));
+ VERIFY_IS_APPROX((2*m2).block(j0,j1,n0,n0), (2*refMat2).block(j0,j1,n0,n0));
}
// test prune
diff --git a/test/sparse_solver.h b/test/sparse_solver.h
index 244e81c1b..59d77daa2 100644
--- a/test/sparse_solver.h
+++ b/test/sparse_solver.h
@@ -161,7 +161,10 @@ int generate_sparse_spd_problem(Solver& , typename Solver::MatrixType& A, typena
dA = dM * dM.adjoint();
halfA.resize(size,size);
- halfA.template selfadjointView<Solver::UpLo>().rankUpdate(M);
+ if(Solver::UpLo==(Lower|Upper))
+ halfA = A;
+ else
+ halfA.template selfadjointView<Solver::UpLo>().rankUpdate(M);
return size;
}
diff --git a/test/sparselu.cpp b/test/sparselu.cpp
index 52371cb12..37eb069a9 100644
--- a/test/sparselu.cpp
+++ b/test/sparselu.cpp
@@ -47,6 +47,9 @@ template<typename T> void test_sparselu_T()
check_sparse_square_abs_determinant(sparselu_colamd);
check_sparse_square_abs_determinant(sparselu_amd);
+
+ check_sparse_square_determinant(sparselu_colamd);
+ check_sparse_square_determinant(sparselu_amd);
}
void test_sparselu()
diff --git a/unsupported/Eigen/CMakeLists.txt b/unsupported/Eigen/CMakeLists.txt
index e06f1238b..e1fbf97e2 100644
--- a/unsupported/Eigen/CMakeLists.txt
+++ b/unsupported/Eigen/CMakeLists.txt
@@ -1,6 +1,6 @@
-set(Eigen_HEADERS AdolcForward BVH IterativeSolvers MatrixFunctions MoreVectorization AutoDiff AlignedVector3 Polynomials
- FFT NonLinearOptimization SparseExtra IterativeSolvers
- NumericalDiff Skyline MPRealSupport OpenGLSupport KroneckerProduct Splines LevenbergMarquardt
+set(Eigen_HEADERS AdolcForward AlignedVector3 ArpackSupport AutoDiff BVH FFT IterativeSolvers KroneckerProduct LevenbergMarquardt
+ MatrixFunctions MoreVectorization MPRealSupport NonLinearOptimization NumericalDiff OpenGLSupport Polynomials
+ Skyline SparseExtra Splines
)
install(FILES
diff --git a/unsupported/Eigen/src/CMakeLists.txt b/unsupported/Eigen/src/CMakeLists.txt
index f3180b52b..125c43fdf 100644
--- a/unsupported/Eigen/src/CMakeLists.txt
+++ b/unsupported/Eigen/src/CMakeLists.txt
@@ -2,6 +2,8 @@ ADD_SUBDIRECTORY(AutoDiff)
ADD_SUBDIRECTORY(BVH)
ADD_SUBDIRECTORY(FFT)
ADD_SUBDIRECTORY(IterativeSolvers)
+ADD_SUBDIRECTORY(KroneckerProduct)
+ADD_SUBDIRECTORY(LevenbergMarquardt)
ADD_SUBDIRECTORY(MatrixFunctions)
ADD_SUBDIRECTORY(MoreVectorization)
ADD_SUBDIRECTORY(NonLinearOptimization)
@@ -9,5 +11,4 @@ ADD_SUBDIRECTORY(NumericalDiff)
ADD_SUBDIRECTORY(Polynomials)
ADD_SUBDIRECTORY(Skyline)
ADD_SUBDIRECTORY(SparseExtra)
-ADD_SUBDIRECTORY(KroneckerProduct)
ADD_SUBDIRECTORY(Splines)
diff --git a/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
index c8c84069e..7ba13afd2 100644
--- a/unsupported/Eigen/src/IterativeSolvers/GMRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/GMRES.h
@@ -246,20 +246,7 @@ struct traits<GMRES<_MatrixType,_Preconditioner> >
* \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
- * One can control the start using the solveWithGuess() method. Here is a step by
- * step execution example starting with a random guess and printing the evolution
- * of the estimated error:
- * * \code
- * x = VectorXd::Random(n);
- * solver.setMaxIterations(1);
- * int i = 0;
- * do {
- * x = solver.solveWithGuess(b,x);
- * std::cout << i << " : " << solver.error() << std::endl;
- * ++i;
- * } while (solver.info()!=Success && i<100);
- * \endcode
- * Note that such a step by step excution is slightly slower.
+ * One can control the start using the solveWithGuess() method.
*
* \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
diff --git a/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
index 0e56342a8..30f26aa50 100644
--- a/unsupported/Eigen/src/IterativeSolvers/MINRES.h
+++ b/unsupported/Eigen/src/IterativeSolvers/MINRES.h
@@ -37,22 +37,31 @@ namespace Eigen {
typedef typename Dest::Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> VectorType;
+ // Check for zero rhs
+ const RealScalar rhsNorm2(rhs.squaredNorm());
+ if(rhsNorm2 == 0)
+ {
+ x.setZero();
+ iters = 0;
+ tol_error = 0;
+ return;
+ }
+
// initialize
const int maxIters(iters); // initialize maxIters to iters
const int N(mat.cols()); // the size of the matrix
- const RealScalar rhsNorm2(rhs.squaredNorm());
const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2)
// Initialize preconditioned Lanczos
-// VectorType v_old(N); // will be initialized inside loop
+ VectorType v_old(N); // will be initialized inside loop
VectorType v( VectorType::Zero(N) ); //initialize v
VectorType v_new(rhs-mat*x); //initialize v_new
RealScalar residualNorm2(v_new.squaredNorm());
-// VectorType w(N); // will be initialized inside loop
+ VectorType w(N); // will be initialized inside loop
VectorType w_new(precond.solve(v_new)); // initialize w_new
// RealScalar beta; // will be initialized inside loop
RealScalar beta_new2(v_new.dot(w_new));
- eigen_assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
+ eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
RealScalar beta_new(sqrt(beta_new2));
const RealScalar beta_one(beta_new);
v_new /= beta_new;
@@ -62,14 +71,14 @@ namespace Eigen {
RealScalar c_old(1.0);
RealScalar s(0.0); // the sine of the Givens rotation
RealScalar s_old(0.0); // the sine of the Givens rotation
-// VectorType p_oold(N); // will be initialized in loop
+ VectorType p_oold(N); // will be initialized in loop
VectorType p_old(VectorType::Zero(N)); // initialize p_old=0
VectorType p(p_old); // initialize p=0
RealScalar eta(1.0);
iters = 0; // reset iters
- while ( iters < maxIters ){
-
+ while ( iters < maxIters )
+ {
// Preconditioned Lanczos
/* Note that there are 4 variants on the Lanczos algorithm. These are
* described in Paige, C. C. (1972). Computational variants of
@@ -81,17 +90,17 @@ namespace Eigen {
* A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987).
*/
const RealScalar beta(beta_new);
-// v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter
- const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT
+ v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter
+// const VectorType v_old(v); // NOT SURE IF CREATING v_old EVERY ITERATION IS EFFICIENT
v = v_new; // update
-// w = w_new; // update
- const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT
+ w = w_new; // update
+// const VectorType w(w_new); // NOT SURE IF CREATING w EVERY ITERATION IS EFFICIENT
v_new.noalias() = mat*w - beta*v_old; // compute v_new
const RealScalar alpha = v_new.dot(w);
v_new -= alpha*v; // overwrite v_new
w_new = precond.solve(v_new); // overwrite w_new
beta_new2 = v_new.dot(w_new); // compute beta_new
- eigen_assert(beta_new2 >= 0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
+ eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE");
beta_new = sqrt(beta_new2); // compute beta_new
v_new /= beta_new; // overwrite v_new for next iteration
w_new /= beta_new; // overwrite w_new for next iteration
@@ -107,21 +116,28 @@ namespace Eigen {
s=beta_new/r1; // new sine
// Update solution
-// p_oold = p_old;
- const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT
+ p_oold = p_old;
+// const VectorType p_oold(p_old); // NOT SURE IF CREATING p_oold EVERY ITERATION IS EFFICIENT
p_old = p;
p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED?
x += beta_one*c*eta*p;
+
+ /* Update the squared residual. Note that this is the estimated residual.
+ The real residual |Ax-b|^2 may be slightly larger */
residualNorm2 *= s*s;
- if ( residualNorm2 < threshold2){
+ if ( residualNorm2 < threshold2)
+ {
break;
}
eta=-s*eta; // update eta
iters++; // increment iteration number (for output purposes)
}
- tol_error = std::sqrt(residualNorm2 / rhsNorm2); // return error. Note that this is the estimated error. The real error |Ax-b|/|b| may be slightly larger
+
+ /* Compute error. Note that this is the estimated error. The real
+ error |Ax-b|/|b| may be slightly larger */
+ tol_error = std::sqrt(residualNorm2 / rhsNorm2);
}
}
@@ -174,20 +190,7 @@ namespace Eigen {
* \endcode
*
* By default the iterations start with x=0 as an initial guess of the solution.
- * One can control the start using the solveWithGuess() method. Here is a step by
- * step execution example starting with a random guess and printing the evolution
- * of the estimated error:
- * * \code
- * x = VectorXd::Random(n);
- * mr.setMaxIterations(1);
- * int i = 0;
- * do {
- * x = mr.solveWithGuess(b,x);
- * std::cout << i << " : " << mr.error() << std::endl;
- * ++i;
- * } while (mr.info()!=Success && i<100);
- * \endcode
- * Note that such a step by step excution is slightly slower.
+ * One can control the start using the solveWithGuess() method.
*
* \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner
*/
@@ -250,6 +253,11 @@ namespace Eigen {
template<typename Rhs,typename Dest>
void _solveWithGuess(const Rhs& b, Dest& x) const
{
+ typedef typename internal::conditional<UpLo==(Lower|Upper),
+ const MatrixType&,
+ SparseSelfAdjointView<const MatrixType, UpLo>
+ >::type MatrixWrapperType;
+
m_iterations = Base::maxIterations();
m_error = Base::m_tolerance;
@@ -259,7 +267,7 @@ namespace Eigen {
m_error = Base::m_tolerance;
typename Dest::ColXpr xj(x,j);
- internal::minres(mp_matrix->template selfadjointView<UpLo>(), b.col(j), xj,
+ internal::minres(MatrixWrapperType(*mp_matrix), b.col(j), xj,
Base::m_preconditioner, m_iterations, m_error);
}
diff --git a/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt b/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt
index 8513803ce..d9690854d 100644
--- a/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt
+++ b/unsupported/Eigen/src/LevenbergMarquardt/CMakeLists.txt
@@ -2,5 +2,5 @@ FILE(GLOB Eigen_LevenbergMarquardt_SRCS "*.h")
INSTALL(FILES
${Eigen_LevenbergMarquardt_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/LevenbergMarquardt COMPONENT Devel
+ DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/LevenbergMarquardt COMPONENT Devel
)
diff --git a/unsupported/test/minres.cpp b/unsupported/test/minres.cpp
index fd12da548..509ebe09a 100644
--- a/unsupported/test/minres.cpp
+++ b/unsupported/test/minres.cpp
@@ -14,15 +14,32 @@
template<typename T> void test_minres_T()
{
- MINRES<SparseMatrix<T>, Lower, DiagonalPreconditioner<T> > minres_colmajor_diag;
- MINRES<SparseMatrix<T>, Lower, IdentityPreconditioner > minres_colmajor_I;
+ MINRES<SparseMatrix<T>, Lower|Upper, DiagonalPreconditioner<T> > minres_colmajor_diag;
+ MINRES<SparseMatrix<T>, Lower, IdentityPreconditioner > minres_colmajor_lower_I;
+ MINRES<SparseMatrix<T>, Upper, IdentityPreconditioner > minres_colmajor_upper_I;
// MINRES<SparseMatrix<T>, Lower, IncompleteLUT<T> > minres_colmajor_ilut;
//minres<SparseMatrix<T>, SSORPreconditioner<T> > minres_colmajor_ssor;
- CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_diag) );
- CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_I) );
+
+// CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_diag) );
// CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ilut) );
//CALL_SUBTEST( check_sparse_square_solving(minres_colmajor_ssor) );
+
+ // Diagonal preconditioner
+ MINRES<SparseMatrix<T>, Lower, DiagonalPreconditioner<T> > minres_colmajor_lower_diag;
+ MINRES<SparseMatrix<T>, Upper, DiagonalPreconditioner<T> > minres_colmajor_upper_diag;
+ MINRES<SparseMatrix<T>, Upper|Lower, DiagonalPreconditioner<T> > minres_colmajor_uplo_diag;
+
+ // call tests for SPD matrix
+ CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_lower_I) );
+ CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_upper_I) );
+
+ CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_lower_diag) );
+ CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_upper_diag) );
+// CALL_SUBTEST( check_sparse_spd_solving(minres_colmajor_uplo_diag) );
+
+ // TO DO: symmetric semi-definite matrix
+ // TO DO: symmetric indefinite matrix
}
void test_minres()
diff --git a/unsupported/test/mpreal/mpreal.h b/unsupported/test/mpreal/mpreal.h
index dddda7dd9..7d6f4e79f 100644
--- a/unsupported/test/mpreal/mpreal.h
+++ b/unsupported/test/mpreal/mpreal.h
@@ -57,7 +57,8 @@
#include <limits>
// Options
-#define MPREAL_HAVE_INT64_SUPPORT // Enable int64_t support if possible. Available only for MSVC 2010 & GCC.
+// FIXME HAVE_INT64_SUPPORT leads to clashes with long int and int64_t on some systems.
+//#define MPREAL_HAVE_INT64_SUPPORT // Enable int64_t support if possible. Available only for MSVC 2010 & GCC.
#define MPREAL_HAVE_MSVC_DEBUGVIEW // Enable Debugger Visualizer for "Debug" builds in MSVC.
#define MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS // Enable extended std::numeric_limits<mpfr::mpreal> specialization.
// Meaning that "digits", "round_style" and similar members are defined as functions, not constants.