aboutsummaryrefslogtreecommitdiff
path: root/Eigen/src/SparseCore/SparseMatrix.h
diff options
context:
space:
mode:
authorCarlos Hernandez <chernand@google.com>2014-08-05 17:53:32 -0700
committerCarlos Hernandez <chernand@google.com>2014-08-05 17:54:05 -0700
commit7faaa9f3f0df9d23790277834d426c3d992ac3ba (patch)
treeb788ae3b96daf9f5a79d8ec434e1e9edd56b3a72 /Eigen/src/SparseCore/SparseMatrix.h
parent810535bb0c575a003b32076e5352ab8fd3f23a1c (diff)
downloadeigen-7faaa9f3f0df9d23790277834d426c3d992ac3ba.tar.gz
Update Eigen to the latest stable release, 3.2.2android-wear-5.1.1_r1android-wear-5.1.0_r1android-wear-5.0.0_r1android-l-preview_r2android-cts-5.1_r9android-cts-5.1_r8android-cts-5.1_r7android-cts-5.1_r6android-cts-5.1_r5android-cts-5.1_r4android-cts-5.1_r3android-cts-5.1_r28android-cts-5.1_r27android-cts-5.1_r26android-cts-5.1_r25android-cts-5.1_r24android-cts-5.1_r23android-cts-5.1_r22android-cts-5.1_r21android-cts-5.1_r20android-cts-5.1_r2android-cts-5.1_r19android-cts-5.1_r18android-cts-5.1_r17android-cts-5.1_r16android-cts-5.1_r15android-cts-5.1_r14android-cts-5.1_r13android-cts-5.1_r10android-cts-5.1_r1android-cts-5.0_r9android-cts-5.0_r8android-cts-5.0_r7android-cts-5.0_r6android-cts-5.0_r5android-cts-5.0_r4android-cts-5.0_r3android-5.1.1_r9android-5.1.1_r8android-5.1.1_r7android-5.1.1_r6android-5.1.1_r5android-5.1.1_r4android-5.1.1_r38android-5.1.1_r37android-5.1.1_r36android-5.1.1_r35android-5.1.1_r34android-5.1.1_r33android-5.1.1_r30android-5.1.1_r3android-5.1.1_r29android-5.1.1_r28android-5.1.1_r26android-5.1.1_r25android-5.1.1_r24android-5.1.1_r23android-5.1.1_r22android-5.1.1_r20android-5.1.1_r2android-5.1.1_r19android-5.1.1_r18android-5.1.1_r17android-5.1.1_r16android-5.1.1_r15android-5.1.1_r14android-5.1.1_r13android-5.1.1_r12android-5.1.1_r10android-5.1.1_r1android-5.1.0_r5android-5.1.0_r4android-5.1.0_r3android-5.1.0_r1android-5.0.2_r3android-5.0.2_r1android-5.0.1_r1android-5.0.0_r7android-5.0.0_r6android-5.0.0_r5.1android-5.0.0_r5android-5.0.0_r4android-5.0.0_r3android-5.0.0_r2android-5.0.0_r1lollipop-wear-releaselollipop-releaselollipop-mr1-wfc-releaselollipop-mr1-releaselollipop-mr1-fi-releaselollipop-mr1-devlollipop-mr1-cts-releaselollipop-devlollipop-cts-releasel-preview
./Eigen/src/Core/util/NonMPL2.h is left untouched, so that usage of non MPL2 code is disabled. Change-Id: I86fc9257b3c30d0ca15b268d4ef07bf038bba7ca
Diffstat (limited to 'Eigen/src/SparseCore/SparseMatrix.h')
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h617
1 files changed, 380 insertions, 237 deletions
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index efb774f03..ba5e3a9b6 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -31,7 +31,7 @@ namespace Eigen {
*
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
* \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
- * is RowMajor. The default is 0 which means column-major.
+ * is ColMajor or RowMajor. The default is 0 which means column-major.
* \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
*
* This class can be extended with the help of the plugin mechanism described on the page
@@ -170,6 +170,8 @@ class SparseMatrix
* This function returns Scalar(0) if the element is an explicit \em zero */
inline Scalar coeff(Index row, Index col) const
{
+ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
@@ -186,6 +188,8 @@ class SparseMatrix
*/
inline Scalar& coeffRef(Index row, Index col)
{
+ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
@@ -213,11 +217,13 @@ class SparseMatrix
* inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
*
*/
- EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
+ Scalar& insert(Index row, Index col)
{
+ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+
if(isCompressed())
{
- reserve(VectorXi::Constant(outerSize(), 2));
+ reserve(Matrix<Index,Dynamic,1>::Constant(outerSize(), 2));
}
return insertUncompressed(row,col);
}
@@ -281,12 +287,12 @@ class SparseMatrix
template<class SizesType>
inline void reserveInnerVectors(const SizesType& reserveSizes)
{
-
if(isCompressed())
{
std::size_t totalReserveSize = 0;
// turn the matrix into non-compressed mode
- m_innerNonZeros = new Index[m_outerSize];
+ m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
+ if (!m_innerNonZeros) internal::throw_std_bad_alloc();
// temporarily use m_innerSizes to hold the new starting points.
Index* newOuterIndex = m_innerNonZeros;
@@ -299,11 +305,11 @@ class SparseMatrix
totalReserveSize += reserveSizes[j];
}
m_data.reserve(totalReserveSize);
- std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize];
- for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j)
+ Index previousOuterIndex = m_outerIndex[m_outerSize];
+ for(Index j=m_outerSize-1; j>=0; --j)
{
- ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j];
- for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
+ Index innerNNZ = previousOuterIndex - m_outerIndex[j];
+ for(Index i=innerNNZ-1; i>=0; --i)
{
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
@@ -318,25 +324,27 @@ class SparseMatrix
}
else
{
- Index* newOuterIndex = new Index[m_outerSize+1];
+ Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
+ if (!newOuterIndex) internal::throw_std_bad_alloc();
+
Index count = 0;
for(Index j=0; j<m_outerSize; ++j)
{
newOuterIndex[j] = count;
Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
- Index toReserve = std::max<std::ptrdiff_t>(reserveSizes[j], alreadyReserved);
+ Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
count += toReserve + m_innerNonZeros[j];
}
newOuterIndex[m_outerSize] = count;
m_data.resize(count);
- for(ptrdiff_t j=m_outerSize-1; j>=0; --j)
+ for(Index j=m_outerSize-1; j>=0; --j)
{
- std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j];
+ Index offset = newOuterIndex[j] - m_outerIndex[j];
if(offset>0)
{
- std::ptrdiff_t innerNNZ = m_innerNonZeros[j];
- for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
+ Index innerNNZ = m_innerNonZeros[j];
+ for(Index i=innerNNZ-1; i>=0; --i)
{
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
@@ -345,7 +353,7 @@ class SparseMatrix
}
std::swap(m_outerIndex, newOuterIndex);
- delete[] newOuterIndex;
+ std::free(newOuterIndex);
}
}
@@ -394,7 +402,7 @@ class SparseMatrix
* \sa insertBack, insertBackByOuterInner */
inline void startVec(Index outer)
{
- eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
+ eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
m_outerIndex[outer+1] = m_outerIndex[outer];
}
@@ -431,7 +439,7 @@ class SparseMatrix
/** \internal
* same as insert(Index,Index) except that the indices are given relative to the storage order */
- EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i)
+ Scalar& insertByOuterInner(Index j, Index i)
{
return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
}
@@ -448,7 +456,7 @@ class SparseMatrix
for(Index j=1; j<m_outerSize; ++j)
{
Index nextOldStart = m_outerIndex[j+1];
- std::ptrdiff_t offset = oldStart - m_outerIndex[j];
+ Index offset = oldStart - m_outerIndex[j];
if(offset>0)
{
for(Index k=0; k<m_innerNonZeros[j]; ++k)
@@ -460,14 +468,26 @@ class SparseMatrix
m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
oldStart = nextOldStart;
}
- delete[] m_innerNonZeros;
+ std::free(m_innerNonZeros);
m_innerNonZeros = 0;
m_data.resize(m_outerIndex[m_outerSize]);
m_data.squeeze();
}
+ /** Turns the matrix into the uncompressed mode */
+ void uncompress()
+ {
+ if(m_innerNonZeros != 0)
+ return;
+ m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
+ for (Index i = 0; i < m_outerSize; i++)
+ {
+ m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
+ }
+ }
+
/** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
- void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
prune(default_prunning_func(reference,epsilon));
}
@@ -506,6 +526,70 @@ class SparseMatrix
m_data.resize(k,0);
}
+ /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
+ * \sa resizeNonZeros(Index), reserve(), setZero()
+ */
+ void conservativeResize(Index rows, Index cols)
+ {
+ // No change
+ if (this->rows() == rows && this->cols() == cols) return;
+
+ // If one dimension is null, then there is nothing to be preserved
+ if(rows==0 || cols==0) return resize(rows,cols);
+
+ Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
+ Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
+ Index newInnerSize = IsRowMajor ? cols : rows;
+
+ // Deals with inner non zeros
+ if (m_innerNonZeros)
+ {
+ // Resize m_innerNonZeros
+ Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
+ if (!newInnerNonZeros) internal::throw_std_bad_alloc();
+ m_innerNonZeros = newInnerNonZeros;
+
+ for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
+ m_innerNonZeros[i] = 0;
+ }
+ else if (innerChange < 0)
+ {
+ // Inner size decreased: allocate a new m_innerNonZeros
+ m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
+ if (!m_innerNonZeros) internal::throw_std_bad_alloc();
+ for(Index i = 0; i < m_outerSize; i++)
+ m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
+ }
+
+ // Change the m_innerNonZeros in case of a decrease of inner size
+ if (m_innerNonZeros && innerChange < 0)
+ {
+ for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
+ {
+ Index &n = m_innerNonZeros[i];
+ Index start = m_outerIndex[i];
+ while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
+ }
+ }
+
+ m_innerSize = newInnerSize;
+
+ // Re-allocate outer index structure if necessary
+ if (outerChange == 0)
+ return;
+
+ Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
+ if (!newOuterIndex) internal::throw_std_bad_alloc();
+ m_outerIndex = newOuterIndex;
+ if (outerChange > 0)
+ {
+ Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
+ for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
+ m_outerIndex[i] = last;
+ }
+ m_outerSize += outerChange;
+ }
+
/** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
* \sa resizeNonZeros(Index), reserve(), setZero()
*/
@@ -516,13 +600,15 @@ class SparseMatrix
m_data.clear();
if (m_outerSize != outerSize || m_outerSize==0)
{
- delete[] m_outerIndex;
- m_outerIndex = new Index [outerSize+1];
+ std::free(m_outerIndex);
+ m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
+ if (!m_outerIndex) internal::throw_std_bad_alloc();
+
m_outerSize = outerSize;
}
if(m_innerNonZeros)
{
- delete[] m_innerNonZeros;
+ std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
@@ -560,9 +646,20 @@ class SparseMatrix
inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
check_template_parameters();
*this = other.derived();
}
+
+ /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
+ template<typename OtherDerived, unsigned int UpLo>
+ inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ *this = other;
+ }
/** Copy constructor (it performs a deep copy) */
inline SparseMatrix(const SparseMatrix& other)
@@ -572,6 +669,16 @@ class SparseMatrix
*this = other.derived();
}
+ /** \brief Copy constructor with in-place evaluation */
+ template<typename OtherDerived>
+ SparseMatrix(const ReturnByValue<OtherDerived>& other)
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ initAssignment(other);
+ other.evalTo(*this);
+ }
+
/** Swaps the content of two sparse matrices of the same type.
* This is a fast operation that simply swaps the underlying pointers and parameters. */
inline void swap(SparseMatrix& other)
@@ -584,13 +691,22 @@ class SparseMatrix
m_data.swap(other.m_data);
}
+ /** Sets *this to the identity matrix */
+ inline void setIdentity()
+ {
+ eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
+ this->m_data.resize(rows());
+ Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
+ Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
+ Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
+ }
inline SparseMatrix& operator=(const SparseMatrix& other)
{
if (other.isRValue())
{
swap(other.const_cast_derived());
}
- else
+ else if(this!=&other)
{
initAssignment(other);
if(other.isCompressed())
@@ -613,7 +729,10 @@ class SparseMatrix
template<typename OtherDerived>
inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
- { return Base::operator=(other.derived()); }
+ {
+ initAssignment(other);
+ return Base::operator=(other.derived());
+ }
template<typename OtherDerived>
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
@@ -621,58 +740,7 @@ class SparseMatrix
#endif
template<typename OtherDerived>
- EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
- {
- initAssignment(other.derived());
- const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
- if (needToTranspose)
- {
- // two passes algorithm:
- // 1 - compute the number of coeffs per dest inner vector
- // 2 - do the actual copy/eval
- // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
- typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
- typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
- OtherCopy otherCopy(other.derived());
-
- Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
- // pass 1
- // FIXME the above copy could be merged with that pass
- for (Index j=0; j<otherCopy.outerSize(); ++j)
- for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
- ++m_outerIndex[it.index()];
-
- // prefix sum
- Index count = 0;
- VectorXi positions(outerSize());
- for (Index j=0; j<outerSize(); ++j)
- {
- Index tmp = m_outerIndex[j];
- m_outerIndex[j] = count;
- positions[j] = count;
- count += tmp;
- }
- m_outerIndex[outerSize()] = count;
- // alloc
- m_data.resize(count);
- // pass 2
- for (Index j=0; j<otherCopy.outerSize(); ++j)
- {
- for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
- {
- Index pos = positions[it.index()]++;
- m_data.index(pos) = j;
- m_data.value(pos) = it.value();
- }
- }
- return *this;
- }
- else
- {
- // there is no special optimization
- return Base::operator=(other.derived());
- }
- }
+ EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
{
@@ -684,8 +752,8 @@ class SparseMatrix
else
for (Index i=0; i<m.outerSize(); ++i)
{
- int p = m.m_outerIndex[i];
- int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
+ Index p = m.m_outerIndex[i];
+ Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
Index k=p;
for (; k<pe; ++k)
s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
@@ -714,8 +782,8 @@ class SparseMatrix
/** Destructor */
inline ~SparseMatrix()
{
- delete[] m_outerIndex;
- delete[] m_innerNonZeros;
+ std::free(m_outerIndex);
+ std::free(m_innerNonZeros);
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
@@ -735,118 +803,14 @@ protected:
resize(other.rows(), other.cols());
if(m_innerNonZeros)
{
- delete[] m_innerNonZeros;
+ std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
}
/** \internal
* \sa insert(Index,Index) */
- EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
- {
- eigen_assert(isCompressed());
-
- const Index outer = IsRowMajor ? row : col;
- const Index inner = IsRowMajor ? col : row;
-
- Index previousOuter = outer;
- if (m_outerIndex[outer+1]==0)
- {
- // we start a new inner vector
- while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
- {
- m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
- --previousOuter;
- }
- m_outerIndex[outer+1] = m_outerIndex[outer];
- }
-
- // here we have to handle the tricky case where the outerIndex array
- // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
- // the 2nd inner vector...
- bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
- && (size_t(m_outerIndex[outer+1]) == m_data.size());
-
- size_t startId = m_outerIndex[outer];
- // FIXME let's make sure sizeof(long int) == sizeof(size_t)
- size_t p = m_outerIndex[outer+1];
- ++m_outerIndex[outer+1];
-
- float reallocRatio = 1;
- if (m_data.allocatedSize()<=m_data.size())
- {
- // if there is no preallocated memory, let's reserve a minimum of 32 elements
- if (m_data.size()==0)
- {
- m_data.reserve(32);
- }
- else
- {
- // we need to reallocate the data, to reduce multiple reallocations
- // we use a smart resize algorithm based on the current filling ratio
- // in addition, we use float to avoid integers overflows
- float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
- reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
- // furthermore we bound the realloc ratio to:
- // 1) reduce multiple minor realloc when the matrix is almost filled
- // 2) avoid to allocate too much memory when the matrix is almost empty
- reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
- }
- }
- m_data.resize(m_data.size()+1,reallocRatio);
-
- if (!isLastVec)
- {
- if (previousOuter==-1)
- {
- // oops wrong guess.
- // let's correct the outer offsets
- for (Index k=0; k<=(outer+1); ++k)
- m_outerIndex[k] = 0;
- Index k=outer+1;
- while(m_outerIndex[k]==0)
- m_outerIndex[k++] = 1;
- while (k<=m_outerSize && m_outerIndex[k]!=0)
- m_outerIndex[k++]++;
- p = 0;
- --k;
- k = m_outerIndex[k]-1;
- while (k>0)
- {
- m_data.index(k) = m_data.index(k-1);
- m_data.value(k) = m_data.value(k-1);
- k--;
- }
- }
- else
- {
- // we are not inserting into the last inner vec
- // update outer indices:
- Index j = outer+2;
- while (j<=m_outerSize && m_outerIndex[j]!=0)
- m_outerIndex[j++]++;
- --j;
- // shift data of last vecs:
- Index k = m_outerIndex[j]-1;
- while (k>=Index(p))
- {
- m_data.index(k) = m_data.index(k-1);
- m_data.value(k) = m_data.value(k-1);
- k--;
- }
- }
- }
-
- while ( (p > startId) && (m_data.index(p-1) > inner) )
- {
- m_data.index(p) = m_data.index(p-1);
- m_data.value(p) = m_data.value(p-1);
- --p;
- }
-
- m_data.index(p) = inner;
- return (m_data.value(p) = 0);
- }
+ EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
/** \internal
* A vector object that is equal to 0 everywhere but v at the position i */
@@ -865,40 +829,12 @@ protected:
/** \internal
* \sa insert(Index,Index) */
- EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col)
- {
- eigen_assert(!isCompressed());
-
- const Index outer = IsRowMajor ? row : col;
- const Index inner = IsRowMajor ? col : row;
-
- std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
- std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
- if(innerNNZ>=room)
- {
- // this inner vector is full, we need to reallocate the whole buffer :(
- reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
- }
-
- Index startId = m_outerIndex[outer];
- Index p = startId + m_innerNonZeros[outer];
- while ( (p > startId) && (m_data.index(p-1) > inner) )
- {
- m_data.index(p) = m_data.index(p-1);
- m_data.value(p) = m_data.value(p-1);
- --p;
- }
-
- m_innerNonZeros[outer]++;
-
- m_data.index(p) = inner;
- return (m_data.value(p) = 0);
- }
+ EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
public:
/** \internal
* \sa insert(Index,Index) */
- inline Scalar& insertBackUncompressed(Index row, Index col)
+ EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
{
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
@@ -906,8 +842,7 @@ public:
eigen_assert(!isCompressed());
eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
- Index p = m_outerIndex[outer] + m_innerNonZeros[outer];
- m_innerNonZeros[outer]++;
+ Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
m_data.index(p) = inner;
return (m_data.value(p) = 0);
}
@@ -916,10 +851,11 @@ private:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+ EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
}
struct default_prunning_func {
- default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
+ default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
inline bool operator() (const Index&, const Index&, const Scalar& value) const
{
return !internal::isMuchSmallerThan(value, reference, epsilon);
@@ -1004,21 +940,27 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
enum { IsRowMajor = SparseMatrixType::IsRowMajor };
typedef typename SparseMatrixType::Scalar Scalar;
typedef typename SparseMatrixType::Index Index;
- SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
+ SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
- // pass 1: count the nnz per inner-vector
- VectorXi wi(trMat.outerSize());
- wi.setZero();
- for(InputIterator it(begin); it!=end; ++it)
- wi(IsRowMajor ? it->col() : it->row())++;
+ if(begin!=end)
+ {
+ // pass 1: count the nnz per inner-vector
+ Matrix<Index,Dynamic,1> wi(trMat.outerSize());
+ wi.setZero();
+ for(InputIterator it(begin); it!=end; ++it)
+ {
+ eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
+ wi(IsRowMajor ? it->col() : it->row())++;
+ }
- // pass 2: insert all the elements into trMat
- trMat.reserve(wi);
- for(InputIterator it(begin); it!=end; ++it)
- trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
+ // pass 2: insert all the elements into trMat
+ trMat.reserve(wi);
+ for(InputIterator it(begin); it!=end; ++it)
+ trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
- // pass 3:
- trMat.sumupDuplicates();
+ // pass 3:
+ trMat.sumupDuplicates();
+ }
// pass 4: transposed copy -> implicit sorting
mat = trMat;
@@ -1027,7 +969,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
}
-/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \b.
+/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
*
* A \em triplet is a tuple (i,j,value) defining a non-zero element.
* The input list of triplets does not have to be sorted, and can contains duplicated elements.
@@ -1077,11 +1019,11 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
{
eigen_assert(!isCompressed());
// TODO, in practice we should be able to use m_innerNonZeros for that task
- VectorXi wi(innerSize());
+ Matrix<Index,Dynamic,1> wi(innerSize());
wi.fill(-1);
Index count = 0;
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
- for(int j=0; j<outerSize(); ++j)
+ for(Index j=0; j<outerSize(); ++j)
{
Index start = count;
Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
@@ -1106,11 +1048,212 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
m_outerIndex[m_outerSize] = count;
// turn the matrix into compressed form
- delete[] m_innerNonZeros;
+ std::free(m_innerNonZeros);
m_innerNonZeros = 0;
m_data.resize(m_outerIndex[m_outerSize]);
}
+template<typename Scalar, int _Options, typename _Index>
+template<typename OtherDerived>
+EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
+{
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ if (needToTranspose)
+ {
+ // two passes algorithm:
+ // 1 - compute the number of coeffs per dest inner vector
+ // 2 - do the actual copy/eval
+ // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
+ typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+ typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ SparseMatrix dest(other.rows(),other.cols());
+ Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
+
+ // pass 1
+ // FIXME the above copy could be merged with that pass
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ ++dest.m_outerIndex[it.index()];
+
+ // prefix sum
+ Index count = 0;
+ Matrix<Index,Dynamic,1> positions(dest.outerSize());
+ for (Index j=0; j<dest.outerSize(); ++j)
+ {
+ Index tmp = dest.m_outerIndex[j];
+ dest.m_outerIndex[j] = count;
+ positions[j] = count;
+ count += tmp;
+ }
+ dest.m_outerIndex[dest.outerSize()] = count;
+ // alloc
+ dest.m_data.resize(count);
+ // pass 2
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ {
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ {
+ Index pos = positions[it.index()]++;
+ dest.m_data.index(pos) = j;
+ dest.m_data.value(pos) = it.value();
+ }
+ }
+ this->swap(dest);
+ return *this;
+ }
+ else
+ {
+ if(other.isRValue())
+ initAssignment(other.derived());
+ // there is no special optimization
+ return Base::operator=(other.derived());
+ }
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
+{
+ eigen_assert(!isCompressed());
+
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
+ Index innerNNZ = m_innerNonZeros[outer];
+ if(innerNNZ>=room)
+ {
+ // this inner vector is full, we need to reallocate the whole buffer :(
+ reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
+ }
+
+ Index startId = m_outerIndex[outer];
+ Index p = startId + m_innerNonZeros[outer];
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+ eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
+
+ m_innerNonZeros[outer]++;
+
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
+{
+ eigen_assert(isCompressed());
+
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index previousOuter = outer;
+ if (m_outerIndex[outer+1]==0)
+ {
+ // we start a new inner vector
+ while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
+ {
+ m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
+ --previousOuter;
+ }
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+
+ // here we have to handle the tricky case where the outerIndex array
+ // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
+ // the 2nd inner vector...
+ bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
+ && (size_t(m_outerIndex[outer+1]) == m_data.size());
+
+ size_t startId = m_outerIndex[outer];
+ // FIXME let's make sure sizeof(long int) == sizeof(size_t)
+ size_t p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+
+ double reallocRatio = 1;
+ if (m_data.allocatedSize()<=m_data.size())
+ {
+ // if there is no preallocated memory, let's reserve a minimum of 32 elements
+ if (m_data.size()==0)
+ {
+ m_data.reserve(32);
+ }
+ else
+ {
+ // we need to reallocate the data, to reduce multiple reallocations
+ // we use a smart resize algorithm based on the current filling ratio
+ // in addition, we use double to avoid integers overflows
+ double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
+ reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
+ // furthermore we bound the realloc ratio to:
+ // 1) reduce multiple minor realloc when the matrix is almost filled
+ // 2) avoid to allocate too much memory when the matrix is almost empty
+ reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
+ }
+ }
+ m_data.resize(m_data.size()+1,reallocRatio);
+
+ if (!isLastVec)
+ {
+ if (previousOuter==-1)
+ {
+ // oops wrong guess.
+ // let's correct the outer offsets
+ for (Index k=0; k<=(outer+1); ++k)
+ m_outerIndex[k] = 0;
+ Index k=outer+1;
+ while(m_outerIndex[k]==0)
+ m_outerIndex[k++] = 1;
+ while (k<=m_outerSize && m_outerIndex[k]!=0)
+ m_outerIndex[k++]++;
+ p = 0;
+ --k;
+ k = m_outerIndex[k]-1;
+ while (k>0)
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ else
+ {
+ // we are not inserting into the last inner vec
+ // update outer indices:
+ Index j = outer+2;
+ while (j<=m_outerSize && m_outerIndex[j]!=0)
+ m_outerIndex[j++]++;
+ --j;
+ // shift data of last vecs:
+ Index k = m_outerIndex[j]-1;
+ while (k>=Index(p))
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ }
+
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+}
+
} // end namespace Eigen
#endif // EIGEN_SPARSEMATRIX_H