aboutsummaryrefslogtreecommitdiff
path: root/Eigen/src/SVD/JacobiSVD.h
diff options
context:
space:
mode:
Diffstat (limited to 'Eigen/src/SVD/JacobiSVD.h')
-rw-r--r--Eigen/src/SVD/JacobiSVD.h182
1 files changed, 138 insertions, 44 deletions
diff --git a/Eigen/src/SVD/JacobiSVD.h b/Eigen/src/SVD/JacobiSVD.h
index a7dbf0737..dff9e44eb 100644
--- a/Eigen/src/SVD/JacobiSVD.h
+++ b/Eigen/src/SVD/JacobiSVD.h
@@ -78,7 +78,8 @@ public:
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
- m_qr = FullPivHouseholderQR<MatrixType>(svd.rows(), svd.cols());
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
}
@@ -96,7 +97,8 @@ public:
return false;
}
private:
- FullPivHouseholderQR<MatrixType> m_qr;
+ typedef FullPivHouseholderQR<MatrixType> QRType;
+ QRType m_qr;
WorkspaceType m_workspace;
};
@@ -121,7 +123,8 @@ public:
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
- m_qr = FullPivHouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
}
m_adjoint.resize(svd.cols(), svd.rows());
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
@@ -141,7 +144,8 @@ public:
else return false;
}
private:
- FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
+ typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
@@ -158,7 +162,8 @@ public:
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
- m_qr = ColPivHouseholderQR<MatrixType>(svd.rows(), svd.cols());
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
@@ -183,7 +188,8 @@ public:
}
private:
- ColPivHouseholderQR<MatrixType> m_qr;
+ typedef ColPivHouseholderQR<MatrixType> QRType;
+ QRType m_qr;
typename internal::plain_col_type<MatrixType>::type m_workspace;
};
@@ -209,7 +215,8 @@ public:
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
- m_qr = ColPivHouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
}
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
@@ -237,7 +244,8 @@ public:
}
private:
- ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
+ typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
@@ -254,7 +262,8 @@ public:
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
- m_qr = HouseholderQR<MatrixType>(svd.rows(), svd.cols());
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
@@ -278,7 +287,8 @@ public:
return false;
}
private:
- HouseholderQR<MatrixType> m_qr;
+ typedef HouseholderQR<MatrixType> QRType;
+ QRType m_qr;
typename internal::plain_col_type<MatrixType>::type m_workspace;
};
@@ -304,7 +314,8 @@ public:
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
- m_qr = HouseholderQR<TransposeTypeWithSameStorageOrder>(svd.cols(), svd.rows());
+ m_qr.~QRType();
+ ::new (&m_qr) QRType(svd.cols(), svd.rows());
}
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
@@ -332,7 +343,8 @@ public:
}
private:
- HouseholderQR<TransposeTypeWithSameStorageOrder> m_qr;
+ typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
+ QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
@@ -359,17 +371,23 @@ struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
typedef typename SVD::Index Index;
static void run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q)
{
+ using std::sqrt;
Scalar z;
JacobiRotation<Scalar> rot;
- RealScalar n = sqrt(abs2(work_matrix.coeff(p,p)) + abs2(work_matrix.coeff(q,p)));
+ RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
+
if(n==0)
{
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.row(p) *= z;
if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
- z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
- work_matrix.row(q) *= z;
- if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ if(work_matrix.coeff(q,q)!=Scalar(0))
+ {
+ z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
+ work_matrix.row(q) *= z;
+ if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
+ }
+ // otherwise the second row is already zero, so we have nothing to do.
}
else
{
@@ -398,9 +416,11 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
JacobiRotation<RealScalar> *j_left,
JacobiRotation<RealScalar> *j_right)
{
+ using std::sqrt;
+ using std::abs;
Matrix<RealScalar,2,2> m;
- m << real(matrix.coeff(p,p)), real(matrix.coeff(p,q)),
- real(matrix.coeff(q,p)), real(matrix.coeff(q,q));
+ m << numext::real(matrix.coeff(p,p)), numext::real(matrix.coeff(p,q)),
+ numext::real(matrix.coeff(q,p)), numext::real(matrix.coeff(q,q));
JacobiRotation<RealScalar> rot1;
RealScalar t = m.coeff(0,0) + m.coeff(1,1);
RealScalar d = m.coeff(1,0) - m.coeff(0,1);
@@ -411,9 +431,11 @@ void real_2x2_jacobi_svd(const MatrixType& matrix, Index p, Index q,
}
else
{
- RealScalar u = d / t;
- rot1.c() = RealScalar(1) / sqrt(RealScalar(1) + abs2(u));
- rot1.s() = rot1.c() * u;
+ RealScalar t2d2 = numext::hypot(t,d);
+ rot1.c() = abs(t)/t2d2;
+ rot1.s() = d/t2d2;
+ if(t<RealScalar(0))
+ rot1.s() = -rot1.s();
}
m.applyOnTheLeft(0,1,rot1);
j_right->makeJacobi(m,0,1);
@@ -514,8 +536,9 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
JacobiSVD()
: m_isInitialized(false),
m_isAllocated(false),
+ m_usePrescribedThreshold(false),
m_computationOptions(0),
- m_rows(-1), m_cols(-1)
+ m_rows(-1), m_cols(-1), m_diagSize(0)
{}
@@ -528,6 +551,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
: m_isInitialized(false),
m_isAllocated(false),
+ m_usePrescribedThreshold(false),
m_computationOptions(0),
m_rows(-1), m_cols(-1)
{
@@ -547,6 +571,7 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
: m_isInitialized(false),
m_isAllocated(false),
+ m_usePrescribedThreshold(false),
m_computationOptions(0),
m_rows(-1), m_cols(-1)
{
@@ -648,6 +673,69 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
return m_nonzeroSingularValues;
}
+
+ /** \returns the rank of the matrix of which \c *this is the SVD.
+ *
+ * \note This method has to determine which singular values should be considered nonzero.
+ * For that, it uses the threshold value that you can control by calling
+ * setThreshold(const RealScalar&).
+ */
+ inline Index rank() const
+ {
+ using std::abs;
+ eigen_assert(m_isInitialized && "JacobiSVD is not initialized.");
+ if(m_singularValues.size()==0) return 0;
+ RealScalar premultiplied_threshold = m_singularValues.coeff(0) * threshold();
+ Index i = m_nonzeroSingularValues-1;
+ while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
+ return i+1;
+ }
+
+ /** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
+ * which need to determine when singular values are to be considered nonzero.
+ * This is not used for the SVD decomposition itself.
+ *
+ * When it needs to get the threshold value, Eigen calls threshold().
+ * The default is \c NumTraits<Scalar>::epsilon()
+ *
+ * \param threshold The new value to use as the threshold.
+ *
+ * A singular value will be considered nonzero if its value is strictly greater than
+ * \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$.
+ *
+ * If you want to come back to the default behavior, call setThreshold(Default_t)
+ */
+ JacobiSVD& setThreshold(const RealScalar& threshold)
+ {
+ m_usePrescribedThreshold = true;
+ m_prescribedThreshold = threshold;
+ return *this;
+ }
+
+ /** Allows to come back to the default behavior, letting Eigen use its default formula for
+ * determining the threshold.
+ *
+ * You should pass the special object Eigen::Default as parameter here.
+ * \code svd.setThreshold(Eigen::Default); \endcode
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ JacobiSVD& setThreshold(Default_t)
+ {
+ m_usePrescribedThreshold = false;
+ return *this;
+ }
+
+ /** Returns the threshold that will be used by certain methods such as rank().
+ *
+ * See the documentation of setThreshold(const RealScalar&).
+ */
+ RealScalar threshold() const
+ {
+ eigen_assert(m_isInitialized || m_usePrescribedThreshold);
+ return m_usePrescribedThreshold ? m_prescribedThreshold
+ : (std::max<Index>)(1,m_diagSize)*NumTraits<Scalar>::epsilon();
+ }
inline Index rows() const { return m_rows; }
inline Index cols() const { return m_cols; }
@@ -660,11 +748,12 @@ template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
MatrixVType m_matrixV;
SingularValuesType m_singularValues;
WorkMatrixType m_workMatrix;
- bool m_isInitialized, m_isAllocated;
+ bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
bool m_computeFullU, m_computeThinU;
bool m_computeFullV, m_computeThinV;
unsigned int m_computationOptions;
Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
+ RealScalar m_prescribedThreshold;
template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
friend struct internal::svd_precondition_2x2_block_to_be_real;
@@ -709,12 +798,14 @@ void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Index rows, Index cols, u
}
m_diagSize = (std::min)(m_rows, m_cols);
m_singularValues.resize(m_diagSize);
- m_matrixU.resize(m_rows, m_computeFullU ? m_rows
- : m_computeThinU ? m_diagSize
- : 0);
- m_matrixV.resize(m_cols, m_computeFullV ? m_cols
- : m_computeThinV ? m_diagSize
- : 0);
+ if(RowsAtCompileTime==Dynamic)
+ m_matrixU.resize(m_rows, m_computeFullU ? m_rows
+ : m_computeThinU ? m_diagSize
+ : 0);
+ if(ColsAtCompileTime==Dynamic)
+ m_matrixV.resize(m_cols, m_computeFullV ? m_cols
+ : m_computeThinV ? m_diagSize
+ : 0);
m_workMatrix.resize(m_diagSize, m_diagSize);
if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
@@ -725,6 +816,7 @@ template<typename MatrixType, int QRPreconditioner>
JacobiSVD<MatrixType, QRPreconditioner>&
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
+ using std::abs;
allocate(matrix.rows(), matrix.cols(), computationOptions);
// currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
@@ -744,6 +836,11 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
}
+
+ // Scaling factor to reduce over/under-flows
+ RealScalar scale = m_workMatrix.cwiseAbs().maxCoeff();
+ if(scale==RealScalar(0)) scale = RealScalar(1);
+ m_workMatrix /= scale;
/*** step 2. The main Jacobi SVD iteration. ***/
@@ -762,9 +859,9 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
// keep us iterating forever. Similarly, small denormal numbers are considered zero.
using std::max;
- RealScalar threshold = (max)(considerAsZero, precision * (max)(internal::abs(m_workMatrix.coeff(p,p)),
- internal::abs(m_workMatrix.coeff(q,q))));
- if((max)(internal::abs(m_workMatrix.coeff(p,q)),internal::abs(m_workMatrix.coeff(q,p))) > threshold)
+ RealScalar threshold = (max)(considerAsZero, precision * (max)(abs(m_workMatrix.coeff(p,p)),
+ abs(m_workMatrix.coeff(q,q))));
+ if((max)(abs(m_workMatrix.coeff(p,q)),abs(m_workMatrix.coeff(q,p))) > threshold)
{
finished = false;
@@ -788,7 +885,7 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
for(Index i = 0; i < m_diagSize; ++i)
{
- RealScalar a = internal::abs(m_workMatrix.coeff(i,i));
+ RealScalar a = abs(m_workMatrix.coeff(i,i));
m_singularValues.coeffRef(i) = a;
if(computeU() && (a!=RealScalar(0))) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
}
@@ -813,6 +910,8 @@ JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsig
if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
}
}
+
+ m_singularValues *= scale;
m_isInitialized = true;
return *this;
@@ -833,17 +932,12 @@ struct solve_retval<JacobiSVD<_MatrixType, QRPreconditioner>, Rhs>
// A = U S V^*
// So A^{-1} = V S^{-1} U^*
- Index diagSize = (std::min)(dec().rows(), dec().cols());
- typename JacobiSVDType::SingularValuesType invertedSingVals(diagSize);
-
- Index nonzeroSingVals = dec().nonzeroSingularValues();
- invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
- invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
-
- dst = dec().matrixV().leftCols(diagSize)
- * invertedSingVals.asDiagonal()
- * dec().matrixU().leftCols(diagSize).adjoint()
- * rhs();
+ Matrix<Scalar, Dynamic, Rhs::ColsAtCompileTime, 0, _MatrixType::MaxRowsAtCompileTime, Rhs::MaxColsAtCompileTime> tmp;
+ Index rank = dec().rank();
+
+ tmp.noalias() = dec().matrixU().leftCols(rank).adjoint() * rhs();
+ tmp = dec().singularValues().head(rank).asDiagonal().inverse() * tmp;
+ dst = dec().matrixV().leftCols(rank) * tmp;
}
};
} // end namespace internal