aboutsummaryrefslogtreecommitdiff
path: root/Eigen/src/SparseCore/SparseMatrix.h
diff options
context:
space:
mode:
Diffstat (limited to 'Eigen/src/SparseCore/SparseMatrix.h')
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h143
1 files changed, 129 insertions, 14 deletions
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index 323c2323b..616b4a0c2 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -21,7 +21,7 @@ namespace Eigen {
* This class implements a more versatile variants of the common \em compressed row/column storage format.
* Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
* All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
- * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
+ * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
* can be done with limited memory reallocation and copies.
*
* A call to the function makeCompressed() turns the matrix into the standard \em compressed format
@@ -99,6 +99,8 @@ class SparseMatrix
typedef SparseCompressedBase<SparseMatrix> Base;
using Base::convert_index;
friend class SparseVector<_Scalar,0,_StorageIndex>;
+ template<typename, typename, typename, typename, typename>
+ friend struct internal::Assignment;
public:
using Base::isCompressed;
using Base::nonZeros;
@@ -327,7 +329,8 @@ class SparseMatrix
m_outerIndex[j] = newOuterIndex[j];
m_innerNonZeros[j] = innerNNZ;
}
- m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
+ if(m_outerSize>0)
+ m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
m_data.resize(m_outerIndex[m_outerSize]);
}
@@ -502,8 +505,8 @@ class SparseMatrix
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
}
}
-
- /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
+
+ /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
prune(default_prunning_func(reference,epsilon));
@@ -576,10 +579,12 @@ class SparseMatrix
else if (innerChange < 0)
{
// Inner size decreased: allocate a new m_innerNonZeros
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
- for(Index i = 0; i < m_outerSize; i++)
+ for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
+ for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
+ m_innerNonZeros[i] = 0;
}
// Change the m_innerNonZeros in case of a decrease of inner size
@@ -604,9 +609,9 @@ class SparseMatrix
m_outerIndex = newOuterIndex;
if (outerChange > 0)
{
- StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
+ StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
- m_outerIndex[i] = last;
+ m_outerIndex[i] = lastIdx;
}
m_outerSize += outerChange;
}
@@ -780,6 +785,9 @@ class SparseMatrix
template<typename OtherDerived>
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
{ return Base::operator=(other.derived()); }
+
+ template<typename Lhs, typename Rhs>
+ inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
#endif // EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
@@ -893,7 +901,114 @@ public:
Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
m_data.index(p) = convert_index(inner);
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
+ }
+protected:
+ struct IndexPosPair {
+ IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
+ Index i;
+ Index p;
+ };
+
+ /** \internal assign \a diagXpr to the diagonal of \c *this
+ * There are different strategies:
+ * 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression.
+ * 2 - otherwise, for each diagonal coeff,
+ * 2.a - if it already exists, then we update it,
+ * 2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion.
+ * 2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector.
+ * 3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements.
+ *
+ * TODO: some piece of code could be isolated and reused for a general in-place update strategy.
+ * TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once),
+ * then it *might* be better to disable case 2.b since they will have to be copied anyway.
+ */
+ template<typename DiagXpr, typename Func>
+ void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
+ {
+ Index n = diagXpr.size();
+
+ const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
+ if(overwrite)
+ {
+ if((this->rows()!=n) || (this->cols()!=n))
+ this->resize(n, n);
+ }
+
+ if(m_data.size()==0 || overwrite)
+ {
+ typedef Array<StorageIndex,Dynamic,1> ArrayXI;
+ this->makeCompressed();
+ this->resizeNonZeros(n);
+ Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
+ Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
+ Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
+ values.setZero();
+ internal::call_assignment_no_alias(values, diagXpr, assignFunc);
+ }
+ else
+ {
+ bool isComp = isCompressed();
+ internal::evaluator<DiagXpr> diaEval(diagXpr);
+ std::vector<IndexPosPair> newEntries;
+
+ // 1 - try in-place update and record insertion failures
+ for(Index i = 0; i<n; ++i)
+ {
+ internal::LowerBoundIndex lb = this->lower_bound(i,i);
+ Index p = lb.value;
+ if(lb.found)
+ {
+ // the coeff already exists
+ assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
+ }
+ else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
+ {
+ // non compressed mode with local room for inserting one element
+ m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
+ m_innerNonZeros[i]++;
+ m_data.value(p) = Scalar(0);
+ m_data.index(p) = StorageIndex(i);
+ assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
+ }
+ else
+ {
+ // defer insertion
+ newEntries.push_back(IndexPosPair(i,p));
+ }
+ }
+ // 2 - insert deferred entries
+ Index n_entries = Index(newEntries.size());
+ if(n_entries>0)
+ {
+ Storage newData(m_data.size()+n_entries);
+ Index prev_p = 0;
+ Index prev_i = 0;
+ for(Index k=0; k<n_entries;++k)
+ {
+ Index i = newEntries[k].i;
+ Index p = newEntries[k].p;
+ internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
+ internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
+ for(Index j=prev_i;j<i;++j)
+ m_outerIndex[j+1] += k;
+ if(!isComp)
+ m_innerNonZeros[i]++;
+ prev_p = p;
+ prev_i = i;
+ newData.value(p+k) = Scalar(0);
+ newData.index(p+k) = StorageIndex(i);
+ assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
+ }
+ {
+ internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
+ internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
+ for(Index j=prev_i+1;j<=m_outerSize;++j)
+ m_outerIndex[j] += n_entries;
+ }
+ m_data.swap(newData);
+ }
+ }
}
private:
@@ -973,7 +1088,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
* \code
typedef Triplet<double> T;
std::vector<T> tripletList;
- triplets.reserve(estimation_of_entries);
+ tripletList.reserve(estimation_of_entries);
for(...)
{
// ...
@@ -986,7 +1101,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
*
* \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
- * be explicitely stored into a std::vector for instance.
+ * be explicitly stored into a std::vector for instance.
*/
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators>
@@ -1232,7 +1347,7 @@ typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Sca
}
m_data.index(p) = convert_index(inner);
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
}
if(m_data.size() != m_data.allocatedSize())
@@ -1274,7 +1389,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar&
m_innerNonZeros[outer]++;
m_data.index(p) = inner;
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
}
template<typename _Scalar, int _Options, typename _StorageIndex>
@@ -1381,7 +1496,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar&
}
m_data.index(p) = inner;
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
}
namespace internal {