aboutsummaryrefslogtreecommitdiff
path: root/Eigen/src/SparseCore
diff options
context:
space:
mode:
Diffstat (limited to 'Eigen/src/SparseCore')
-rw-r--r--Eigen/src/SparseCore/AmbiVector.h7
-rw-r--r--Eigen/src/SparseCore/CompressedStorage.h16
-rw-r--r--Eigen/src/SparseCore/ConservativeSparseSparseProduct.h79
-rw-r--r--Eigen/src/SparseCore/SparseAssign.h108
-rw-r--r--Eigen/src/SparseCore/SparseBlock.h82
-rw-r--r--Eigen/src/SparseCore/SparseCompressedBase.h51
-rw-r--r--Eigen/src/SparseCore/SparseCwiseBinaryOp.h26
-rw-r--r--Eigen/src/SparseCore/SparseCwiseUnaryOp.h6
-rw-r--r--Eigen/src/SparseCore/SparseDenseProduct.h38
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h143
-rw-r--r--Eigen/src/SparseCore/SparseMatrixBase.h17
-rw-r--r--Eigen/src/SparseCore/SparseProduct.h14
-rw-r--r--Eigen/src/SparseCore/SparseRef.h14
-rw-r--r--Eigen/src/SparseCore/SparseSelfAdjointView.h15
-rw-r--r--Eigen/src/SparseCore/SparseSparseProductWithPruning.h22
-rw-r--r--Eigen/src/SparseCore/SparseUtil.h8
-rw-r--r--Eigen/src/SparseCore/SparseVector.h2
-rw-r--r--Eigen/src/SparseCore/SparseView.h1
18 files changed, 438 insertions, 211 deletions
diff --git a/Eigen/src/SparseCore/AmbiVector.h b/Eigen/src/SparseCore/AmbiVector.h
index 8a5cc91f2..2cb7747cc 100644
--- a/Eigen/src/SparseCore/AmbiVector.h
+++ b/Eigen/src/SparseCore/AmbiVector.h
@@ -28,7 +28,7 @@ class AmbiVector
typedef typename NumTraits<Scalar>::Real RealScalar;
explicit AmbiVector(Index size)
- : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
+ : m_buffer(0), m_zero(0), m_size(0), m_end(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
{
resize(size);
}
@@ -94,7 +94,7 @@ class AmbiVector
Index allocSize = m_allocatedElements * sizeof(ListEl);
allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
Scalar* newBuffer = new Scalar[allocSize];
- memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
+ std::memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
delete[] m_buffer;
m_buffer = newBuffer;
}
@@ -147,7 +147,8 @@ template<typename _Scalar,typename _StorageIndex>
void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
{
m_mode = mode;
- if (m_mode==IsSparse)
+ // This is only necessary in sparse mode, but we set these unconditionally to avoid some maybe-uninitialized warnings
+ // if (m_mode==IsSparse)
{
m_llSize = 0;
m_llStart = -1;
diff --git a/Eigen/src/SparseCore/CompressedStorage.h b/Eigen/src/SparseCore/CompressedStorage.h
index d89fa0dae..acd986fab 100644
--- a/Eigen/src/SparseCore/CompressedStorage.h
+++ b/Eigen/src/SparseCore/CompressedStorage.h
@@ -207,6 +207,22 @@ class CompressedStorage
return m_values[id];
}
+ void moveChunk(Index from, Index to, Index chunkSize)
+ {
+ eigen_internal_assert(to+chunkSize <= m_size);
+ if(to>from && from+chunkSize>to)
+ {
+ // move backward
+ internal::smart_memmove(m_values+from, m_values+from+chunkSize, m_values+to);
+ internal::smart_memmove(m_indices+from, m_indices+from+chunkSize, m_indices+to);
+ }
+ else
+ {
+ internal::smart_copy(m_values+from, m_values+from+chunkSize, m_values+to);
+ internal::smart_copy(m_indices+from, m_indices+from+chunkSize, m_indices+to);
+ }
+ }
+
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
Index k = 0;
diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index 492eb0a29..948650253 100644
--- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
+++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -10,29 +10,31 @@
#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
-namespace Eigen {
+namespace Eigen {
namespace internal {
template<typename Lhs, typename Rhs, typename ResultType>
static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false)
{
- typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
+ typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
+ typedef typename remove_all<ResultType>::type::Scalar ResScalar;
// make sure to call innerSize/outerSize since we fake the storage order.
Index rows = lhs.innerSize();
Index cols = rhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
-
+
ei_declare_aligned_stack_constructed_variable(bool, mask, rows, 0);
- ei_declare_aligned_stack_constructed_variable(Scalar, values, rows, 0);
+ ei_declare_aligned_stack_constructed_variable(ResScalar, values, rows, 0);
ei_declare_aligned_stack_constructed_variable(Index, indices, rows, 0);
-
+
std::memset(mask,0,sizeof(bool)*rows);
evaluator<Lhs> lhsEval(lhs);
evaluator<Rhs> rhsEval(rhs);
-
+
// estimate the number of non zero entries
// given a rhs column containing Y non zeros, we assume that the respective Y columns
// of the lhs differs in average of one non zeros, thus the number of non zeros for
@@ -51,12 +53,12 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
Index nnz = 0;
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
{
- Scalar y = rhsIt.value();
+ RhsScalar y = rhsIt.value();
Index k = rhsIt.index();
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
{
Index i = lhsIt.index();
- Scalar x = lhsIt.value();
+ LhsScalar x = lhsIt.value();
if(!mask[i])
{
mask[i] = true;
@@ -139,7 +141,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrixAux;
typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime,ColMajorMatrixAux::Flags>::type ColMajorMatrix;
-
+
// If the result is tall and thin (in the extreme case a column vector)
// then it is faster to sort the coefficients inplace instead of transposing twice.
// FIXME, the following heuristic is probably not very good.
@@ -153,7 +155,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
else
{
ColMajorMatrixAux resCol(lhs.rows(),rhs.cols());
- // ressort to transpose to sort the entries
+ // resort to transpose to sort the entries
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrixAux>(lhs, rhs, resCol, false);
RowMajorMatrix resRow(resCol);
res = resRow.markAsRValue();
@@ -166,11 +168,12 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
- RowMajorMatrix rhsRow = rhs;
- RowMajorMatrix resRow(lhs.rows(), rhs.cols());
- internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
- res = resRow;
+ typedef SparseMatrix<typename Rhs::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRhs;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRes;
+ RowMajorRhs rhsRow = rhs;
+ RowMajorRes resRow(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<RowMajorRhs,Lhs,RowMajorRes>(rhsRow, lhs, resRow);
+ res = resRow;
}
};
@@ -179,10 +182,11 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
- RowMajorMatrix lhsRow = lhs;
- RowMajorMatrix resRow(lhs.rows(), rhs.cols());
- internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
+ typedef SparseMatrix<typename Lhs::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorLhs;
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRes;
+ RowMajorLhs lhsRow = lhs;
+ RowMajorRes resRow(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorLhs,RowMajorRes>(rhs, lhsRow, resRow);
res = resRow;
}
};
@@ -219,10 +223,11 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
- ColMajorMatrix lhsCol = lhs;
- ColMajorMatrix resCol(lhs.rows(), rhs.cols());
- internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
+ typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorLhs;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRes;
+ ColMajorLhs lhsCol = lhs;
+ ColMajorRes resCol(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<ColMajorLhs,Rhs,ColMajorRes>(lhsCol, rhs, resCol);
res = resCol;
}
};
@@ -232,10 +237,11 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
- ColMajorMatrix rhsCol = rhs;
- ColMajorMatrix resCol(lhs.rows(), rhs.cols());
- internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
+ typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRhs;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRes;
+ ColMajorRhs rhsCol = rhs;
+ ColMajorRes resCol(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorRhs,ColMajorRes>(lhs, rhsCol, resCol);
res = resCol;
}
};
@@ -263,7 +269,8 @@ namespace internal {
template<typename Lhs, typename Rhs, typename ResultType>
static void sparse_sparse_to_dense_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
+ typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
Index cols = rhs.outerSize();
eigen_assert(lhs.outerSize() == rhs.innerSize());
@@ -274,12 +281,12 @@ static void sparse_sparse_to_dense_product_impl(const Lhs& lhs, const Rhs& rhs,
{
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
{
- Scalar y = rhsIt.value();
+ RhsScalar y = rhsIt.value();
Index k = rhsIt.index();
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
{
Index i = lhsIt.index();
- Scalar x = lhsIt.value();
+ LhsScalar x = lhsIt.value();
res.coeffRef(i,j) += x * y;
}
}
@@ -310,9 +317,9 @@ struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMa
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
- ColMajorMatrix lhsCol(lhs);
- internal::sparse_sparse_to_dense_product_impl<ColMajorMatrix,Rhs,ResultType>(lhsCol, rhs, res);
+ typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorLhs;
+ ColMajorLhs lhsCol(lhs);
+ internal::sparse_sparse_to_dense_product_impl<ColMajorLhs,Rhs,ResultType>(lhsCol, rhs, res);
}
};
@@ -321,9 +328,9 @@ struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMa
{
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
- ColMajorMatrix rhsCol(rhs);
- internal::sparse_sparse_to_dense_product_impl<Lhs,ColMajorMatrix,ResultType>(lhs, rhsCol, res);
+ typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRhs;
+ ColMajorRhs rhsCol(rhs);
+ internal::sparse_sparse_to_dense_product_impl<Lhs,ColMajorRhs,ResultType>(lhs, rhsCol, res);
}
};
diff --git a/Eigen/src/SparseCore/SparseAssign.h b/Eigen/src/SparseCore/SparseAssign.h
index 18352a847..905485c88 100644
--- a/Eigen/src/SparseCore/SparseAssign.h
+++ b/Eigen/src/SparseCore/SparseAssign.h
@@ -83,7 +83,7 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
// eval without temporary
dst.resize(src.rows(), src.cols());
dst.setZero();
- dst.reserve((std::max)(src.rows(),src.cols())*2);
+ dst.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
for (Index j=0; j<outerEvaluationSize; ++j)
{
dst.startVec(j);
@@ -107,7 +107,7 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
DstXprType temp(src.rows(), src.cols());
- temp.reserve((std::max)(src.rows(),src.cols())*2);
+ temp.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
for (Index j=0; j<outerEvaluationSize; ++j)
{
temp.startVec(j);
@@ -134,8 +134,8 @@ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
};
// Generic Sparse to Dense assignment
-template< typename DstXprType, typename SrcXprType, typename Functor>
-struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
+template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak>
+struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Weak>
{
static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
{
@@ -153,6 +153,73 @@ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
}
};
+// Specialization for dense ?= dense +/- sparse and dense ?= sparse +/- dense
+template<typename DstXprType, typename Func1, typename Func2>
+struct assignment_from_dense_op_sparse
+{
+ template<typename SrcXprType, typename InitialFunc>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/)
+ {
+ #ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN
+ EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN
+ #endif
+
+ call_assignment_no_alias(dst, src.lhs(), Func1());
+ call_assignment_no_alias(dst, src.rhs(), Func2());
+ }
+
+ // Specialization for dense1 = sparse + dense2; -> dense1 = dense2; dense1 += sparse;
+ template<typename Lhs, typename Rhs, typename Scalar>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ typename internal::enable_if<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type
+ run(DstXprType &dst, const CwiseBinaryOp<internal::scalar_sum_op<Scalar,Scalar>, const Lhs, const Rhs> &src,
+ const internal::assign_op<typename DstXprType::Scalar,Scalar>& /*func*/)
+ {
+ #ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN
+ EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN
+ #endif
+
+ // Apply the dense matrix first, then the sparse one.
+ call_assignment_no_alias(dst, src.rhs(), Func1());
+ call_assignment_no_alias(dst, src.lhs(), Func2());
+ }
+
+ // Specialization for dense1 = sparse - dense2; -> dense1 = -dense2; dense1 += sparse;
+ template<typename Lhs, typename Rhs, typename Scalar>
+ static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
+ typename internal::enable_if<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type
+ run(DstXprType &dst, const CwiseBinaryOp<internal::scalar_difference_op<Scalar,Scalar>, const Lhs, const Rhs> &src,
+ const internal::assign_op<typename DstXprType::Scalar,Scalar>& /*func*/)
+ {
+ #ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN
+ EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN
+ #endif
+
+ // Apply the dense matrix first, then the sparse one.
+ call_assignment_no_alias(dst, -src.rhs(), Func1());
+ call_assignment_no_alias(dst, src.lhs(), add_assign_op<typename DstXprType::Scalar,typename Lhs::Scalar>());
+ }
+};
+
+#define EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(ASSIGN_OP,BINOP,ASSIGN_OP2) \
+ template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> \
+ struct Assignment<DstXprType, CwiseBinaryOp<internal::BINOP<Scalar,Scalar>, const Lhs, const Rhs>, internal::ASSIGN_OP<typename DstXprType::Scalar,Scalar>, \
+ Sparse2Dense, \
+ typename internal::enable_if< internal::is_same<typename internal::evaluator_traits<Lhs>::Shape,DenseShape>::value \
+ || internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type> \
+ : assignment_from_dense_op_sparse<DstXprType, internal::ASSIGN_OP<typename DstXprType::Scalar,typename Lhs::Scalar>, internal::ASSIGN_OP2<typename DstXprType::Scalar,typename Rhs::Scalar> > \
+ {}
+
+EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(assign_op, scalar_sum_op,add_assign_op);
+EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(add_assign_op,scalar_sum_op,add_assign_op);
+EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(sub_assign_op,scalar_sum_op,sub_assign_op);
+
+EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(assign_op, scalar_difference_op,sub_assign_op);
+EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(add_assign_op,scalar_difference_op,sub_assign_op);
+EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(sub_assign_op,scalar_difference_op,add_assign_op);
+
+
// Specialization for "dst = dec.solve(rhs)"
// NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
@@ -179,35 +246,22 @@ struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
{
typedef typename DstXprType::StorageIndex StorageIndex;
typedef typename DstXprType::Scalar Scalar;
- typedef Array<StorageIndex,Dynamic,1> ArrayXI;
- typedef Array<Scalar,Dynamic,1> ArrayXS;
- template<int Options>
- static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
- {
- Index dstRows = src.rows();
- Index dstCols = src.cols();
- if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
- dst.resize(dstRows, dstCols);
- Index size = src.diagonal().size();
- dst.makeCompressed();
- dst.resizeNonZeros(size);
- Map<ArrayXI>(dst.innerIndexPtr(), size).setLinSpaced(0,StorageIndex(size)-1);
- Map<ArrayXI>(dst.outerIndexPtr(), size+1).setLinSpaced(0,StorageIndex(size));
- Map<ArrayXS>(dst.valuePtr(), size) = src.diagonal();
- }
+ template<int Options, typename AssignFunc>
+ static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const AssignFunc &func)
+ { dst.assignDiagonal(src.diagonal(), func); }
template<typename DstDerived>
static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
- {
- dst.diagonal() = src.diagonal();
- }
+ { dst.derived().diagonal() = src.diagonal(); }
- static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
- { dst.diagonal() += src.diagonal(); }
+ template<typename DstDerived>
+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
+ { dst.derived().diagonal() += src.diagonal(); }
- static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
- { dst.diagonal() -= src.diagonal(); }
+ template<typename DstDerived>
+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
+ { dst.derived().diagonal() -= src.diagonal(); }
};
} // end namespace internal
diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h
index 511e92b2f..5b4f6cc9f 100644
--- a/Eigen/src/SparseCore/SparseBlock.h
+++ b/Eigen/src/SparseCore/SparseBlock.h
@@ -164,7 +164,7 @@ public:
}
else
{
- if(m_matrix.isCompressed())
+ if(m_matrix.isCompressed() && nnz!=block_size)
{
// no need to realloc, simply copy the tail at its respective position and insert tmp
matrix.data().resize(start + nnz + tail_size);
@@ -326,46 +326,6 @@ private:
//----------
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major).
- */
-template<typename Derived>
-typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer)
-{ return InnerVectorReturnType(derived(), outer); }
-
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major). Read-only.
- */
-template<typename Derived>
-const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const
-{ return ConstInnerVectorReturnType(derived(), outer); }
-
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major).
- */
-template<typename Derived>
-typename SparseMatrixBase<Derived>::InnerVectorsReturnType
-SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
-{
- return Block<Derived,Dynamic,Dynamic,true>(derived(),
- IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
- IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
-
-}
-
-/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
- * is col-major (resp. row-major). Read-only.
- */
-template<typename Derived>
-const typename SparseMatrixBase<Derived>::ConstInnerVectorsReturnType
-SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
-{
- return Block<const Derived,Dynamic,Dynamic,true>(derived(),
- IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
- IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
-
-}
-
/** Generic implementation of sparse Block expression.
* Real-only.
*/
@@ -486,9 +446,13 @@ struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBa
{}
inline Index nonZerosEstimate() const {
- Index nnz = m_block.nonZeros();
- if(nnz<0)
- return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size();
+ const Index nnz = m_block.nonZeros();
+ if(nnz < 0) {
+ // Scale the non-zero estimate for the underlying expression linearly with block size.
+ // Return zero if the underlying block is empty.
+ const Index nested_sz = m_block.nestedExpression().size();
+ return nested_sz == 0 ? 0 : m_argImpl.nonZerosEstimate() * m_block.size() / nested_sz;
+ }
return nnz;
}
@@ -503,22 +467,25 @@ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
: public EvalIterator
{
- enum { IsRowMajor = unary_evaluator::IsRowMajor };
+ // NOTE MSVC fails to compile if we don't explicitely "import" IsRowMajor from unary_evaluator
+ // because the base class EvalIterator has a private IsRowMajor enum too. (bug #1786)
+ // NOTE We cannot call it IsRowMajor because it would shadow unary_evaluator::IsRowMajor
+ enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
const XprType& m_block;
Index m_end;
public:
EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
- : EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
+ : EvalIterator(aEval.m_argImpl, outer + (XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
m_block(aEval.m_block),
- m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
+ m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
{
- while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) )
+ while( (EvalIterator::operator bool()) && (EvalIterator::index() < (XprIsRowMajor ? m_block.startCol() : m_block.startRow())) )
EvalIterator::operator++();
}
- inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(IsRowMajor ? m_block.startCol() : m_block.startRow()); }
- inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }
+ inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(XprIsRowMajor ? m_block.startCol() : m_block.startRow()); }
+ inline Index outer() const { return EvalIterator::outer() - (XprIsRowMajor ? m_block.startRow() : m_block.startCol()); }
inline Index row() const { return EvalIterator::row() - m_block.startRow(); }
inline Index col() const { return EvalIterator::col() - m_block.startCol(); }
@@ -528,7 +495,8 @@ public:
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
{
- enum { IsRowMajor = unary_evaluator::IsRowMajor };
+ // NOTE see above
+ enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
const unary_evaluator& m_eval;
Index m_outerPos;
const Index m_innerIndex;
@@ -538,9 +506,9 @@ public:
EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
: m_eval(aEval),
- m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
- m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
- m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
+ m_outerPos( (XprIsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
+ m_innerIndex(XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
+ m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
m_it(m_eval.m_argImpl, m_outerPos)
{
EIGEN_UNUSED_VARIABLE(outer);
@@ -551,10 +519,10 @@ public:
++(*this);
}
- inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
+ inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (XprIsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
inline Index outer() const { return 0; }
- inline Index row() const { return IsRowMajor ? 0 : index(); }
- inline Index col() const { return IsRowMajor ? index() : 0; }
+ inline Index row() const { return XprIsRowMajor ? 0 : index(); }
+ inline Index col() const { return XprIsRowMajor ? index() : 0; }
inline Scalar value() const { return m_it.value(); }
inline Scalar& valueRef() { return m_it.valueRef(); }
diff --git a/Eigen/src/SparseCore/SparseCompressedBase.h b/Eigen/src/SparseCore/SparseCompressedBase.h
index 5ccb46656..6a2c7a8ce 100644
--- a/Eigen/src/SparseCore/SparseCompressedBase.h
+++ b/Eigen/src/SparseCore/SparseCompressedBase.h
@@ -128,6 +128,28 @@ class SparseCompressedBase
protected:
/** Default constructor. Do nothing. */
SparseCompressedBase() {}
+
+ /** \internal return the index of the coeff at (row,col) or just before if it does not exist.
+ * This is an analogue of std::lower_bound.
+ */
+ internal::LowerBoundIndex lower_bound(Index row, Index col) const
+ {
+ eigen_internal_assert(row>=0 && row<this->rows() && col>=0 && col<this->cols());
+
+ const Index outer = Derived::IsRowMajor ? row : col;
+ const Index inner = Derived::IsRowMajor ? col : row;
+
+ Index start = this->outerIndexPtr()[outer];
+ Index end = this->isCompressed() ? this->outerIndexPtr()[outer+1] : this->outerIndexPtr()[outer] + this->innerNonZeroPtr()[outer];
+ eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist");
+ internal::LowerBoundIndex p;
+ p.value = std::lower_bound(this->innerIndexPtr()+start, this->innerIndexPtr()+end,inner) - this->innerIndexPtr();
+ p.found = (p.value<end) && (this->innerIndexPtr()[p.value]==inner);
+ return p;
+ }
+
+ friend struct internal::evaluator<SparseCompressedBase<Derived> >;
+
private:
template<typename OtherDerived> explicit SparseCompressedBase(const SparseCompressedBase<OtherDerived>&);
};
@@ -185,6 +207,14 @@ class SparseCompressedBase<Derived>::InnerIterator
}
inline InnerIterator& operator++() { m_id++; return *this; }
+ inline InnerIterator& operator+=(Index i) { m_id += i ; return *this; }
+
+ inline InnerIterator operator+(Index i)
+ {
+ InnerIterator result = *this;
+ result += i;
+ return result;
+ }
inline const Scalar& value() const { return m_values[m_id]; }
inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
@@ -245,6 +275,14 @@ class SparseCompressedBase<Derived>::ReverseInnerIterator
}
inline ReverseInnerIterator& operator--() { --m_id; return *this; }
+ inline ReverseInnerIterator& operator-=(Index i) { m_id -= i; return *this; }
+
+ inline ReverseInnerIterator operator-(Index i)
+ {
+ ReverseInnerIterator result = *this;
+ result -= i;
+ return result;
+ }
inline const Scalar& value() const { return m_values[m_id-1]; }
inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
@@ -317,17 +355,8 @@ protected:
Index find(Index row, Index col) const
{
- eigen_internal_assert(row>=0 && row<m_matrix->rows() && col>=0 && col<m_matrix->cols());
-
- const Index outer = Derived::IsRowMajor ? row : col;
- const Index inner = Derived::IsRowMajor ? col : row;
-
- Index start = m_matrix->outerIndexPtr()[outer];
- Index end = m_matrix->isCompressed() ? m_matrix->outerIndexPtr()[outer+1] : m_matrix->outerIndexPtr()[outer] + m_matrix->innerNonZeroPtr()[outer];
- eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist");
- const Index p = std::lower_bound(m_matrix->innerIndexPtr()+start, m_matrix->innerIndexPtr()+end,inner) - m_matrix->innerIndexPtr();
-
- return ((p<end) && (m_matrix->innerIndexPtr()[p]==inner)) ? p : Dynamic;
+ internal::LowerBoundIndex p = m_matrix->lower_bound(row,col);
+ return p.found ? p.value : Dynamic;
}
const Derived *m_matrix;
diff --git a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
index e315e3550..9b0d3f98d 100644
--- a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
+++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -101,7 +101,7 @@ public:
}
else
{
- m_value = 0; // this is to avoid a compilation warning
+ m_value = Scalar(0); // this is to avoid a compilation warning
m_id = -1;
}
return *this;
@@ -126,7 +126,7 @@ public:
enum {
- CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
Flags = XprType::Flags
};
@@ -211,9 +211,8 @@ public:
enum {
- CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
- // Expose storage order of the sparse expression
- Flags = (XprType::Flags & ~RowMajorBit) | (int(Rhs::Flags)&RowMajorBit)
+ CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
+ Flags = XprType::Flags
};
explicit binary_evaluator(const XprType& xpr)
@@ -299,9 +298,8 @@ public:
enum {
- CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
- // Expose storage order of the sparse expression
- Flags = (XprType::Flags & ~RowMajorBit) | (int(Lhs::Flags)&RowMajorBit)
+ CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
+ Flags = XprType::Flags
};
explicit binary_evaluator(const XprType& xpr)
@@ -459,7 +457,7 @@ public:
enum {
- CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
Flags = XprType::Flags
};
@@ -532,9 +530,8 @@ public:
enum {
- CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
- // Expose storage order of the sparse expression
- Flags = (XprType::Flags & ~RowMajorBit) | (int(RhsArg::Flags)&RowMajorBit)
+ CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
+ Flags = XprType::Flags
};
explicit sparse_conjunction_evaluator(const XprType& xpr)
@@ -607,9 +604,8 @@ public:
enum {
- CoeffReadCost = evaluator<LhsArg>::CoeffReadCost + evaluator<RhsArg>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
- // Expose storage order of the sparse expression
- Flags = (XprType::Flags & ~RowMajorBit) | (int(LhsArg::Flags)&RowMajorBit)
+ CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
+ Flags = XprType::Flags
};
explicit sparse_conjunction_evaluator(const XprType& xpr)
diff --git a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
index ea7973790..32dac0f78 100644
--- a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
+++ b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
@@ -24,7 +24,7 @@ struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>
class InnerIterator;
enum {
- CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
+ CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
Flags = XprType::Flags
};
@@ -49,6 +49,7 @@ template<typename UnaryOp, typename ArgType>
class unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::InnerIterator
: public unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator
{
+ protected:
typedef typename XprType::Scalar Scalar;
typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator Base;
public:
@@ -78,7 +79,7 @@ struct unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>
class InnerIterator;
enum {
- CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<ViewOp>::Cost,
+ CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<ViewOp>::Cost),
Flags = XprType::Flags
};
@@ -99,6 +100,7 @@ template<typename ViewOp, typename ArgType>
class unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::InnerIterator
: public unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator
{
+ protected:
typedef typename XprType::Scalar Scalar;
typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator Base;
public:
diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h
index 0547db596..f005a18a1 100644
--- a/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -88,10 +88,11 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, A
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
+ typedef evaluator<Lhs> LhsEval;
+ typedef typename LhsEval::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
- evaluator<Lhs> lhsEval(lhs);
+ LhsEval lhsEval(lhs);
for(Index c=0; c<rhs.cols(); ++c)
{
for(Index j=0; j<lhs.outerSize(); ++j)
@@ -111,17 +112,38 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
+ typedef evaluator<Lhs> LhsEval;
+ typedef typename LhsEval::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
- evaluator<Lhs> lhsEval(lhs);
- for(Index j=0; j<lhs.outerSize(); ++j)
+ Index n = lhs.rows();
+ LhsEval lhsEval(lhs);
+
+#ifdef EIGEN_HAS_OPENMP
+ Eigen::initParallel();
+ Index threads = Eigen::nbThreads();
+ // This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
+ // It basically represents the minimal amount of work to be done to be worth it.
+ if(threads>1 && lhsEval.nonZerosEstimate()*rhs.cols() > 20000)
{
- typename Res::RowXpr res_j(res.row(j));
- for(LhsInnerIterator it(lhsEval,j); it ;++it)
- res_j += (alpha*it.value()) * rhs.row(it.index());
+ #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
+ for(Index i=0; i<n; ++i)
+ processRow(lhsEval,rhs,res,alpha,i);
+ }
+ else
+#endif
+ {
+ for(Index i=0; i<n; ++i)
+ processRow(lhsEval, rhs, res, alpha, i);
}
}
+
+ static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, Res& res, const typename Res::Scalar& alpha, Index i)
+ {
+ typename Res::RowXpr res_i(res.row(i));
+ for(LhsInnerIterator it(lhsEval,i); it ;++it)
+ res_i += (alpha*it.value()) * rhs.row(it.index());
+ }
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index 323c2323b..616b4a0c2 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -21,7 +21,7 @@ namespace Eigen {
* This class implements a more versatile variants of the common \em compressed row/column storage format.
* Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
* All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
- * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
+ * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
* can be done with limited memory reallocation and copies.
*
* A call to the function makeCompressed() turns the matrix into the standard \em compressed format
@@ -99,6 +99,8 @@ class SparseMatrix
typedef SparseCompressedBase<SparseMatrix> Base;
using Base::convert_index;
friend class SparseVector<_Scalar,0,_StorageIndex>;
+ template<typename, typename, typename, typename, typename>
+ friend struct internal::Assignment;
public:
using Base::isCompressed;
using Base::nonZeros;
@@ -327,7 +329,8 @@ class SparseMatrix
m_outerIndex[j] = newOuterIndex[j];
m_innerNonZeros[j] = innerNNZ;
}
- m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
+ if(m_outerSize>0)
+ m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
m_data.resize(m_outerIndex[m_outerSize]);
}
@@ -502,8 +505,8 @@ class SparseMatrix
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
}
}
-
- /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
+
+ /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
prune(default_prunning_func(reference,epsilon));
@@ -576,10 +579,12 @@ class SparseMatrix
else if (innerChange < 0)
{
// Inner size decreased: allocate a new m_innerNonZeros
- m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
- for(Index i = 0; i < m_outerSize; i++)
+ for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
+ for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
+ m_innerNonZeros[i] = 0;
}
// Change the m_innerNonZeros in case of a decrease of inner size
@@ -604,9 +609,9 @@ class SparseMatrix
m_outerIndex = newOuterIndex;
if (outerChange > 0)
{
- StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
+ StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
- m_outerIndex[i] = last;
+ m_outerIndex[i] = lastIdx;
}
m_outerSize += outerChange;
}
@@ -780,6 +785,9 @@ class SparseMatrix
template<typename OtherDerived>
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
{ return Base::operator=(other.derived()); }
+
+ template<typename Lhs, typename Rhs>
+ inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
#endif // EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
@@ -893,7 +901,114 @@ public:
Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
m_data.index(p) = convert_index(inner);
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
+ }
+protected:
+ struct IndexPosPair {
+ IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
+ Index i;
+ Index p;
+ };
+
+ /** \internal assign \a diagXpr to the diagonal of \c *this
+ * There are different strategies:
+ * 1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression.
+ * 2 - otherwise, for each diagonal coeff,
+ * 2.a - if it already exists, then we update it,
+ * 2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion.
+ * 2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector.
+ * 3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements.
+ *
+ * TODO: some piece of code could be isolated and reused for a general in-place update strategy.
+ * TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once),
+ * then it *might* be better to disable case 2.b since they will have to be copied anyway.
+ */
+ template<typename DiagXpr, typename Func>
+ void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
+ {
+ Index n = diagXpr.size();
+
+ const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
+ if(overwrite)
+ {
+ if((this->rows()!=n) || (this->cols()!=n))
+ this->resize(n, n);
+ }
+
+ if(m_data.size()==0 || overwrite)
+ {
+ typedef Array<StorageIndex,Dynamic,1> ArrayXI;
+ this->makeCompressed();
+ this->resizeNonZeros(n);
+ Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
+ Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
+ Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
+ values.setZero();
+ internal::call_assignment_no_alias(values, diagXpr, assignFunc);
+ }
+ else
+ {
+ bool isComp = isCompressed();
+ internal::evaluator<DiagXpr> diaEval(diagXpr);
+ std::vector<IndexPosPair> newEntries;
+
+ // 1 - try in-place update and record insertion failures
+ for(Index i = 0; i<n; ++i)
+ {
+ internal::LowerBoundIndex lb = this->lower_bound(i,i);
+ Index p = lb.value;
+ if(lb.found)
+ {
+ // the coeff already exists
+ assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
+ }
+ else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
+ {
+ // non compressed mode with local room for inserting one element
+ m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
+ m_innerNonZeros[i]++;
+ m_data.value(p) = Scalar(0);
+ m_data.index(p) = StorageIndex(i);
+ assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
+ }
+ else
+ {
+ // defer insertion
+ newEntries.push_back(IndexPosPair(i,p));
+ }
+ }
+ // 2 - insert deferred entries
+ Index n_entries = Index(newEntries.size());
+ if(n_entries>0)
+ {
+ Storage newData(m_data.size()+n_entries);
+ Index prev_p = 0;
+ Index prev_i = 0;
+ for(Index k=0; k<n_entries;++k)
+ {
+ Index i = newEntries[k].i;
+ Index p = newEntries[k].p;
+ internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
+ internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
+ for(Index j=prev_i;j<i;++j)
+ m_outerIndex[j+1] += k;
+ if(!isComp)
+ m_innerNonZeros[i]++;
+ prev_p = p;
+ prev_i = i;
+ newData.value(p+k) = Scalar(0);
+ newData.index(p+k) = StorageIndex(i);
+ assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
+ }
+ {
+ internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
+ internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
+ for(Index j=prev_i+1;j<=m_outerSize;++j)
+ m_outerIndex[j] += n_entries;
+ }
+ m_data.swap(newData);
+ }
+ }
}
private:
@@ -973,7 +1088,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
* \code
typedef Triplet<double> T;
std::vector<T> tripletList;
- triplets.reserve(estimation_of_entries);
+ tripletList.reserve(estimation_of_entries);
for(...)
{
// ...
@@ -986,7 +1101,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
*
* \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
- * be explicitely stored into a std::vector for instance.
+ * be explicitly stored into a std::vector for instance.
*/
template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators>
@@ -1232,7 +1347,7 @@ typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Sca
}
m_data.index(p) = convert_index(inner);
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
}
if(m_data.size() != m_data.allocatedSize())
@@ -1274,7 +1389,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar&
m_innerNonZeros[outer]++;
m_data.index(p) = inner;
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
}
template<typename _Scalar, int _Options, typename _StorageIndex>
@@ -1381,7 +1496,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar&
}
m_data.index(p) = inner;
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
}
namespace internal {
diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
index c6b548f11..229449f02 100644
--- a/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -87,6 +87,11 @@ template<typename Derived> class SparseMatrixBase
* we are dealing with a column-vector (if there is only one column) or with
* a row-vector (if there is only one row). */
+ NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0 : bool(IsVectorAtCompileTime) ? 1 : 2,
+ /**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors,
+ * and 2 for matrices.
+ */
+
Flags = internal::traits<Derived>::Flags,
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
* constructed from this one. See the \ref flags "list of flags".
@@ -350,18 +355,6 @@ template<typename Derived> class SparseMatrixBase
const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(derived()); }
const AdjointReturnType adjoint() const { return AdjointReturnType(transpose()); }
- // inner-vector
- typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> InnerVectorReturnType;
- typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType;
- InnerVectorReturnType innerVector(Index outer);
- const ConstInnerVectorReturnType innerVector(Index outer) const;
-
- // set of inner-vectors
- typedef Block<Derived,Dynamic,Dynamic,true> InnerVectorsReturnType;
- typedef Block<const Derived,Dynamic,Dynamic,true> ConstInnerVectorsReturnType;
- InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize);
- const ConstInnerVectorsReturnType innerVectors(Index outerStart, Index outerSize) const;
-
DenseMatrixType toDense() const
{
return DenseMatrixType(derived());
diff --git a/Eigen/src/SparseCore/SparseProduct.h b/Eigen/src/SparseCore/SparseProduct.h
index 4cbf68781..af8a7744d 100644
--- a/Eigen/src/SparseCore/SparseProduct.h
+++ b/Eigen/src/SparseCore/SparseProduct.h
@@ -17,7 +17,7 @@ namespace Eigen {
* The automatic pruning of the small values can be achieved by calling the pruned() function
* in which case a totally different product algorithm is employed:
* \code
- * C = (A*B).pruned(); // supress numerical zeros (exact)
+ * C = (A*B).pruned(); // suppress numerical zeros (exact)
* C = (A*B).pruned(ref);
* C = (A*B).pruned(ref,epsilon);
* \endcode
@@ -164,6 +164,18 @@ protected:
} // end namespace internal
+// sparse matrix = sparse-product (can be sparse*sparse, sparse*perm, etc.)
+template<typename Scalar, int _Options, typename _StorageIndex>
+template<typename Lhs, typename Rhs>
+SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const Product<Lhs,Rhs,AliasFreeProduct>& src)
+{
+ // std::cout << "in Assignment : " << DstOptions << "\n";
+ SparseMatrix dst(src.rows(),src.cols());
+ internal::generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
+ this->swap(dst);
+ return *this;
+}
+
} // end namespace Eigen
#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/Eigen/src/SparseCore/SparseRef.h b/Eigen/src/SparseCore/SparseRef.h
index d91f38f97..748f87d62 100644
--- a/Eigen/src/SparseCore/SparseRef.h
+++ b/Eigen/src/SparseCore/SparseRef.h
@@ -201,7 +201,7 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
~Ref() {
if(m_hasCopy) {
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
obj->~TPlainObjectType();
}
}
@@ -213,7 +213,7 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
{
if((Options & int(StandardCompressedFormat)) && (!expr.isCompressed()))
{
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
::new (obj) TPlainObjectType(expr);
m_hasCopy = true;
Base::construct(*obj);
@@ -227,14 +227,14 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
template<typename Expression>
void construct(const Expression& expr, internal::false_type)
{
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
::new (obj) TPlainObjectType(expr);
m_hasCopy = true;
Base::construct(*obj);
}
protected:
- char m_object_bytes[sizeof(TPlainObjectType)];
+ typename internal::aligned_storage<sizeof(TPlainObjectType), EIGEN_ALIGNOF(TPlainObjectType)>::type m_storage;
bool m_hasCopy;
};
@@ -319,7 +319,7 @@ class Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType
~Ref() {
if(m_hasCopy) {
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
obj->~TPlainObjectType();
}
}
@@ -335,14 +335,14 @@ class Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType
template<typename Expression>
void construct(const Expression& expr, internal::false_type)
{
- TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(m_object_bytes);
+ TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
::new (obj) TPlainObjectType(expr);
m_hasCopy = true;
Base::construct(*obj);
}
protected:
- char m_object_bytes[sizeof(TPlainObjectType)];
+ typename internal::aligned_storage<sizeof(TPlainObjectType), EIGEN_ALIGNOF(TPlainObjectType)>::type m_storage;
bool m_hasCopy;
};
diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h
index 5ab64f1a8..85b00e10e 100644
--- a/Eigen/src/SparseCore/SparseSelfAdjointView.h
+++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -142,6 +142,9 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
return *this = src.twistedBy(pnull);
}
+ // Since we override the copy-assignment operator, we need to explicitly re-declare the copy-constructor
+ EIGEN_DEFAULT_COPY_CONSTRUCTOR(SparseSelfAdjointView)
+
template<typename SrcMatrixType,unsigned int SrcMode>
SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
{
@@ -311,7 +314,7 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons
while (i && i.index()<j) ++i;
if(i && i.index()==j)
{
- res(j,k) += alpha * i.value() * rhs(j,k);
+ res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
++i;
}
}
@@ -324,14 +327,14 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons
{
LhsScalar lhs_ij = i.value();
if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
- res_j += lhs_ij * rhs(i.index(),k);
+ res_j += lhs_ij * rhs.coeff(i.index(),k);
res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
}
- res(j,k) += alpha * res_j;
+ res.coeffRef(j,k) += alpha * res_j;
// handle diagonal coeff
if (ProcessFirstHalf && i && (i.index()==j))
- res(j,k) += alpha * i.value() * rhs(j,k);
+ res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
}
}
}
@@ -453,7 +456,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
Index r = it.row();
Index c = it.col();
Index ip = perm ? perm[i] : i;
- if(Mode==(Upper|Lower))
+ if(Mode==int(Upper|Lower))
count[StorageOrderMatch ? jp : ip]++;
else if(r==c)
count[ip]++;
@@ -486,7 +489,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
StorageIndex jp = perm ? perm[j] : j;
StorageIndex ip = perm ? perm[i] : i;
- if(Mode==(Upper|Lower))
+ if(Mode==int(Upper|Lower))
{
Index k = count[StorageOrderMatch ? jp : ip]++;
dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
index 21c419002..88820a48f 100644
--- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
+++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -21,7 +21,8 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
{
// return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
- typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
+ typedef typename remove_all<ResultType>::type::Scalar ResScalar;
typedef typename remove_all<Lhs>::type::StorageIndex StorageIndex;
// make sure to call innerSize/outerSize since we fake the storage order.
@@ -31,7 +32,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
eigen_assert(lhs.outerSize() == rhs.innerSize());
// allocate a temporary buffer
- AmbiVector<Scalar,StorageIndex> tempVector(rows);
+ AmbiVector<ResScalar,StorageIndex> tempVector(rows);
// mimics a resizeByInnerOuter:
if(ResultType::IsRowMajor)
@@ -63,14 +64,14 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
{
// FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
tempVector.restart();
- Scalar x = rhsIt.value();
+ RhsScalar x = rhsIt.value();
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt)
{
tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
}
}
res.startVec(j);
- for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector,tolerance); it; ++it)
+ for (typename AmbiVector<ResScalar,StorageIndex>::Iterator it(tempVector,tolerance); it; ++it)
res.insertBackByOuterInner(j,it.index()) = it.value();
}
res.finalize();
@@ -85,7 +86,6 @@ struct sparse_sparse_product_with_pruning_selector;
template<typename Lhs, typename Rhs, typename ResultType>
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
{
- typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
@@ -129,8 +129,8 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,R
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
+ typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
+ typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
ColMajorMatrixLhs colLhs(lhs);
ColMajorMatrixRhs colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);
@@ -149,7 +149,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,R
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixLhs;
+ typedef SparseMatrix<typename Lhs::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixLhs;
RowMajorMatrixLhs rowLhs(lhs);
sparse_sparse_product_with_pruning_selector<RowMajorMatrixLhs,Rhs,ResultType,RowMajor,RowMajor>(rowLhs,rhs,res,tolerance);
}
@@ -161,7 +161,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,C
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixRhs;
+ typedef SparseMatrix<typename Rhs::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixRhs;
RowMajorMatrixRhs rowRhs(rhs);
sparse_sparse_product_with_pruning_selector<Lhs,RowMajorMatrixRhs,ResultType,RowMajor,RowMajor,RowMajor>(lhs,rowRhs,res,tolerance);
}
@@ -173,7 +173,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,R
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
+ typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
ColMajorMatrixRhs colRhs(rhs);
internal::sparse_sparse_product_with_pruning_impl<Lhs,ColMajorMatrixRhs,ResultType>(lhs, colRhs, res, tolerance);
}
@@ -185,7 +185,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,C
typedef typename ResultType::RealScalar RealScalar;
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
{
- typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
+ typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
ColMajorMatrixLhs colLhs(lhs);
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,Rhs,ResultType>(colLhs, rhs, res, tolerance);
}
diff --git a/Eigen/src/SparseCore/SparseUtil.h b/Eigen/src/SparseCore/SparseUtil.h
index 74df0d496..ceb936887 100644
--- a/Eigen/src/SparseCore/SparseUtil.h
+++ b/Eigen/src/SparseCore/SparseUtil.h
@@ -140,6 +140,14 @@ struct SparseSelfAdjointShape { static std::string debugName() { return "SparseS
template<> struct glue_shapes<SparseShape,SelfAdjointShape> { typedef SparseSelfAdjointShape type; };
template<> struct glue_shapes<SparseShape,TriangularShape > { typedef SparseTriangularShape type; };
+// return type of SparseCompressedBase::lower_bound;
+struct LowerBoundIndex {
+ LowerBoundIndex() : value(-1), found(false) {}
+ LowerBoundIndex(Index val, bool ok) : value(val), found(ok) {}
+ Index value;
+ bool found;
+};
+
} // end namespace internal
/** \ingroup SparseCore_Module
diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h
index 19b0fbc9d..05779be68 100644
--- a/Eigen/src/SparseCore/SparseVector.h
+++ b/Eigen/src/SparseCore/SparseVector.h
@@ -281,7 +281,7 @@ class SparseVector
}
/** Swaps the values of \c *this and \a other.
- * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only.
+ * Overloaded for performance: this version performs a \em shallow swap by swapping pointers and attributes only.
* \sa SparseMatrixBase::swap()
*/
inline void swap(SparseVector& other)
diff --git a/Eigen/src/SparseCore/SparseView.h b/Eigen/src/SparseCore/SparseView.h
index 7c4aea743..92b3d1f7b 100644
--- a/Eigen/src/SparseCore/SparseView.h
+++ b/Eigen/src/SparseCore/SparseView.h
@@ -90,6 +90,7 @@ struct unary_evaluator<SparseView<ArgType>, IteratorBased>
class InnerIterator : public EvalIterator
{
+ protected:
typedef typename XprType::Scalar Scalar;
public: