aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen/src/SparseCore
diff options
context:
space:
mode:
Diffstat (limited to 'Eigen/src/SparseCore')
-rw-r--r--Eigen/src/SparseCore/CompressedStorage.h13
-rw-r--r--Eigen/src/SparseCore/SparseBlock.h95
-rw-r--r--Eigen/src/SparseCore/SparseCompressedBase.h35
-rw-r--r--Eigen/src/SparseCore/SparseCwiseBinaryOp.h221
-rw-r--r--Eigen/src/SparseCore/SparseDenseProduct.h2
-rw-r--r--Eigen/src/SparseCore/SparseMap.h41
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h19
-rw-r--r--Eigen/src/SparseCore/SparseMatrixBase.h2
-rw-r--r--Eigen/src/SparseCore/SparseRedux.h4
-rw-r--r--Eigen/src/SparseCore/SparseRef.h27
-rw-r--r--Eigen/src/SparseCore/SparseSelfAdjointView.h30
-rw-r--r--Eigen/src/SparseCore/SparseTriangularView.h117
-rw-r--r--Eigen/src/SparseCore/SparseVector.h40
-rw-r--r--Eigen/src/SparseCore/SparseView.h2
14 files changed, 443 insertions, 205 deletions
diff --git a/Eigen/src/SparseCore/CompressedStorage.h b/Eigen/src/SparseCore/CompressedStorage.h
index 2199848e9..d89fa0dae 100644
--- a/Eigen/src/SparseCore/CompressedStorage.h
+++ b/Eigen/src/SparseCore/CompressedStorage.h
@@ -110,11 +110,16 @@ class CompressedStorage
inline Index allocatedSize() const { return m_allocatedSize; }
inline void clear() { m_size = 0; }
- inline Scalar& value(Index i) { return m_values[i]; }
- inline const Scalar& value(Index i) const { return m_values[i]; }
+ const Scalar* valuePtr() const { return m_values; }
+ Scalar* valuePtr() { return m_values; }
+ const StorageIndex* indexPtr() const { return m_indices; }
+ StorageIndex* indexPtr() { return m_indices; }
- inline StorageIndex& index(Index i) { return m_indices[i]; }
- inline const StorageIndex& index(Index i) const { return m_indices[i]; }
+ inline Scalar& value(Index i) { eigen_internal_assert(m_values!=0); return m_values[i]; }
+ inline const Scalar& value(Index i) const { eigen_internal_assert(m_values!=0); return m_values[i]; }
+
+ inline StorageIndex& index(Index i) { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
+ inline const StorageIndex& index(Index i) const { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
/** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
inline Index searchLowerIndex(Index key) const
diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h
index 10be84856..82fae8c4b 100644
--- a/Eigen/src/SparseCore/SparseBlock.h
+++ b/Eigen/src/SparseCore/SparseBlock.h
@@ -28,11 +28,11 @@ protected:
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
- inline BlockImpl(const XprType& xpr, Index i)
+ inline BlockImpl(XprType& xpr, Index i)
: m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
{}
- inline BlockImpl(const XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
+ inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
{}
@@ -61,7 +61,8 @@ public:
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
}
- inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
+ inline const XprType& nestedExpression() const { return m_matrix; }
+ inline XprType& nestedExpression() { return m_matrix; }
Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
@@ -69,12 +70,19 @@ public:
protected:
- typename XprType::Nested m_matrix;
+ typename internal::ref_selector<XprType>::non_const_type m_matrix;
Index m_outerStart;
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
- public:
- EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
+ protected:
+ // Disable assignment with clear error message.
+ // Note that simply removing operator= yields compilation errors with ICC+MSVC
+ template<typename T>
+ BlockImpl& operator=(const T&)
+ {
+ EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
+ return *this;
+ }
};
@@ -100,11 +108,11 @@ protected:
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
public:
- inline sparse_matrix_block_impl(const SparseMatrixType& xpr, Index i)
+ inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i)
: m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
{}
- inline sparse_matrix_block_impl(const SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
+ inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
{}
@@ -112,7 +120,7 @@ public:
inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
{
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
- _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);;
+ _NestedMatrixType& matrix = m_matrix;
// This assignment is slow if this vector set is not empty
// and/or it is not at the end of the nonzeros of the underlying matrix.
@@ -137,14 +145,14 @@ public:
// realloc manually to reduce copies
typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
- internal::smart_copy(&m_matrix.data().value(0), &m_matrix.data().value(0) + start, &newdata.value(0));
- internal::smart_copy(&m_matrix.data().index(0), &m_matrix.data().index(0) + start, &newdata.index(0));
+ internal::smart_copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr());
+ internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr());
- internal::smart_copy(tmp.valuePtr(), tmp.valuePtr() + nnz, &newdata.value(start));
- internal::smart_copy(tmp.innerIndexPtr(), tmp.innerIndexPtr() + nnz, &newdata.index(start));
+ internal::smart_copy(tmp.valuePtr(), tmp.valuePtr() + nnz, newdata.valuePtr() + start);
+ internal::smart_copy(tmp.innerIndexPtr(), tmp.innerIndexPtr() + nnz, newdata.indexPtr() + start);
- internal::smart_copy(&matrix.data().value(end), &matrix.data().value(end) + tail_size, &newdata.value(start+nnz));
- internal::smart_copy(&matrix.data().index(end), &matrix.data().index(end) + tail_size, &newdata.index(start+nnz));
+ internal::smart_copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz);
+ internal::smart_copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
@@ -159,14 +167,14 @@ public:
// no need to realloc, simply copy the tail at its respective position and insert tmp
matrix.data().resize(start + nnz + tail_size);
- internal::smart_memmove(&matrix.data().value(end), &matrix.data().value(end) + tail_size, &matrix.data().value(start + nnz));
- internal::smart_memmove(&matrix.data().index(end), &matrix.data().index(end) + tail_size, &matrix.data().index(start + nnz));
+ internal::smart_memmove(matrix.valuePtr()+end, matrix.valuePtr() + end+tail_size, matrix.valuePtr() + start+nnz);
+ internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);
update_trailing_pointers = true;
}
- internal::smart_copy(tmp.valuePtr(), tmp.valuePtr() + nnz, &matrix.data().value(start));
- internal::smart_copy(tmp.innerIndexPtr(), tmp.innerIndexPtr() + nnz, &matrix.data().index(start));
+ internal::smart_copy(tmp.valuePtr(), tmp.valuePtr() + nnz, matrix.valuePtr() + start);
+ internal::smart_copy(tmp.innerIndexPtr(), tmp.innerIndexPtr() + nnz, matrix.innerIndexPtr() + start);
}
// update outer index pointers and innerNonZeros
@@ -209,28 +217,28 @@ public:
inline const Scalar* valuePtr() const
{ return m_matrix.valuePtr(); }
inline Scalar* valuePtr()
- { return m_matrix.const_cast_derived().valuePtr(); }
+ { return m_matrix.valuePtr(); }
inline const StorageIndex* innerIndexPtr() const
{ return m_matrix.innerIndexPtr(); }
inline StorageIndex* innerIndexPtr()
- { return m_matrix.const_cast_derived().innerIndexPtr(); }
+ { return m_matrix.innerIndexPtr(); }
inline const StorageIndex* outerIndexPtr() const
{ return m_matrix.outerIndexPtr() + m_outerStart; }
inline StorageIndex* outerIndexPtr()
- { return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; }
+ { return m_matrix.outerIndexPtr() + m_outerStart; }
inline const StorageIndex* innerNonZeroPtr() const
{ return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
inline StorageIndex* innerNonZeroPtr()
- { return isCompressed() ? 0 : (m_matrix.const_cast_derived().innerNonZeroPtr()+m_outerStart); }
+ { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
inline Scalar& coeffRef(Index row, Index col)
{
- return m_matrix.const_cast_derived().coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
+ return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
}
inline const Scalar coeff(Index row, Index col) const
@@ -256,7 +264,8 @@ public:
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
- inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
+ inline const SparseMatrixType& nestedExpression() const { return m_matrix; }
+ inline SparseMatrixType& nestedExpression() { return m_matrix; }
Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
@@ -264,7 +273,7 @@ public:
protected:
- typename SparseMatrixType::Nested m_matrix;
+ typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix;
Index m_outerStart;
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
@@ -373,7 +382,7 @@ public:
/** Column or Row constructor
*/
- inline BlockImpl(const XprType& xpr, Index i)
+ inline BlockImpl(XprType& xpr, Index i)
: m_matrix(xpr),
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),
@@ -383,7 +392,7 @@ public:
/** Dynamic-size constructor
*/
- inline BlockImpl(const XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
+ inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
: m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))
{}
@@ -392,8 +401,7 @@ public:
inline Scalar& coeffRef(Index row, Index col)
{
- return m_matrix.const_cast_derived()
- .coeffRef(row + m_startRow.value(), col + m_startCol.value());
+ return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());
}
inline const Scalar coeff(Index row, Index col) const
@@ -403,19 +411,18 @@ public:
inline Scalar& coeffRef(Index index)
{
- return m_matrix.const_cast_derived()
- .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+ return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
}
inline const Scalar coeff(Index index) const
{
- return m_matrix
- .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
- m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
+ return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
+ m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
}
- inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
+ inline const XprType& nestedExpression() const { return m_matrix; }
+ inline XprType& nestedExpression() { return m_matrix; }
Index startRow() const { return m_startRow.value(); }
Index startCol() const { return m_startCol.value(); }
Index blockRows() const { return m_blockRows.value(); }
@@ -427,15 +434,23 @@ public:
friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
Index nonZeros() const { return Dynamic; }
-
- EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
- typename XprType::Nested m_matrix;
+ typename internal::ref_selector<XprType>::non_const_type m_matrix;
const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
+ protected:
+ // Disable assignment with clear error message.
+ // Note that simply removing operator= yields compilation errors with ICC+MSVC
+ template<typename T>
+ BlockImpl& operator=(const T&)
+ {
+ EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
+ return *this;
+ }
+
};
namespace internal {
diff --git a/Eigen/src/SparseCore/SparseCompressedBase.h b/Eigen/src/SparseCore/SparseCompressedBase.h
index c223e4f42..15854a73b 100644
--- a/Eigen/src/SparseCore/SparseCompressedBase.h
+++ b/Eigen/src/SparseCore/SparseCompressedBase.h
@@ -22,6 +22,16 @@ struct traits<SparseCompressedBase<Derived> > : traits<Derived>
} // end namespace internal
+/** \ingroup SparseCore_Module
+ * \class SparseCompressedBase
+ * \brief Common base class for sparse [compressed]-{row|column}-storage format.
+ *
+ * This class defines the common interface for all derived classes implementing the compressed sparse storage format, such as:
+ * - SparseMatrix
+ * - Ref<SparseMatrixType,Options>
+ * - Map<SparseMatrixType>
+ *
+ */
template<typename Derived>
class SparseCompressedBase
: public SparseMatrixBase<Derived>
@@ -107,6 +117,24 @@ template<typename Derived>
class SparseCompressedBase<Derived>::InnerIterator
{
public:
+ InnerIterator()
+ : m_values(0), m_indices(0), m_outer(0), m_id(0), m_end(0)
+ {}
+
+ InnerIterator(const InnerIterator& other)
+ : m_values(other.m_values), m_indices(other.m_indices), m_outer(other.m_outer), m_id(other.m_id), m_end(other.m_end)
+ {}
+
+ InnerIterator& operator=(const InnerIterator& other)
+ {
+ m_values = other.m_values;
+ m_indices = other.m_indices;
+ const_cast<OuterType&>(m_outer).setValue(other.m_outer.value());
+ m_id = other.m_id;
+ m_end = other.m_end;
+ return *this;
+ }
+
InnerIterator(const SparseCompressedBase& mat, Index outer)
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer)
{
@@ -132,7 +160,7 @@ class SparseCompressedBase<Derived>::InnerIterator
}
explicit InnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)
- : m_values(&data.value(0)), m_indices(&data.index(0)), m_outer(0), m_id(0), m_end(data.size())
+ : m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_id(0), m_end(data.size())
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
}
@@ -152,7 +180,8 @@ class SparseCompressedBase<Derived>::InnerIterator
protected:
const Scalar* m_values;
const StorageIndex* m_indices;
- const internal::variable_if_dynamic<Index,Derived::IsVectorAtCompileTime?0:Dynamic> m_outer;
+ typedef internal::variable_if_dynamic<Index,Derived::IsVectorAtCompileTime?0:Dynamic> OuterType;
+ const OuterType m_outer;
Index m_id;
Index m_end;
private:
@@ -191,7 +220,7 @@ class SparseCompressedBase<Derived>::ReverseInnerIterator
}
explicit ReverseInnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)
- : m_values(&data.value(0)), m_indices(&data.index(0)), m_outer(0), m_start(0), m_id(data.size())
+ : m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_start(0), m_id(data.size())
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
}
diff --git a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
index d9420ac63..c57d9ac59 100644
--- a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
+++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -49,17 +49,10 @@ class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
namespace internal {
-template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived,
- typename _LhsStorageMode = typename traits<Lhs>::StorageKind,
- typename _RhsStorageMode = typename traits<Rhs>::StorageKind>
-class sparse_cwise_binary_op_inner_iterator_selector;
-
-} // end namespace internal
-
-namespace internal {
-
// Generic "sparse OP sparse"
+template<typename XprType> struct binary_sparse_evaluator;
+
template<typename BinaryOp, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IteratorBased>
: evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
@@ -153,6 +146,182 @@ protected:
evaluator<Rhs> m_rhsImpl;
};
+// dense op sparse
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IteratorBased>
+ : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+protected:
+ typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef typename XprType::StorageIndex StorageIndex;
+public:
+
+ class ReverseInnerIterator;
+ class InnerIterator
+ {
+ enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
+ : m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_id(-1), m_innerSize(aEval.m_expr.rhs().innerSize())
+ {
+ this->operator++();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ ++m_id;
+ if(m_id<m_innerSize)
+ {
+ Scalar lhsVal = m_lhsEval.coeff(IsRowMajor?m_rhsIter.outer():m_id,
+ IsRowMajor?m_id:m_rhsIter.outer());
+ if(m_rhsIter && m_rhsIter.index()==m_id)
+ {
+ m_value = m_functor(lhsVal, m_rhsIter.value());
+ ++m_rhsIter;
+ }
+ else
+ m_value = m_functor(lhsVal, Scalar(0));
+ }
+
+ return *this;
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
+
+ EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
+ EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_rhsIter.outer() : m_id; }
+ EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_rhsIter.outer(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_id<m_innerSize; }
+
+ protected:
+ const evaluator<Lhs> &m_lhsEval;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
+ Scalar m_value;
+ StorageIndex m_id;
+ StorageIndex m_innerSize;
+ };
+
+
+ enum {
+ CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ // Expose storage order of the sparse expression
+ Flags = (XprType::Flags & ~RowMajorBit) | (int(Rhs::Flags)&RowMajorBit)
+ };
+
+ explicit binary_evaluator(const XprType& xpr)
+ : m_functor(xpr.functor()),
+ m_lhsImpl(xpr.lhs()),
+ m_rhsImpl(xpr.rhs()),
+ m_expr(xpr)
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ inline Index nonZerosEstimate() const {
+ return m_expr.size();
+ }
+
+protected:
+ const BinaryOp m_functor;
+ evaluator<Lhs> m_lhsImpl;
+ evaluator<Rhs> m_rhsImpl;
+ const XprType &m_expr;
+};
+
+// sparse op dense
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IndexBased>
+ : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+protected:
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef typename XprType::StorageIndex StorageIndex;
+public:
+
+ class ReverseInnerIterator;
+ class InnerIterator
+ {
+ enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
+ : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_id(-1), m_innerSize(aEval.m_expr.lhs().innerSize())
+ {
+ this->operator++();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ ++m_id;
+ if(m_id<m_innerSize)
+ {
+ Scalar rhsVal = m_rhsEval.coeff(IsRowMajor?m_lhsIter.outer():m_id,
+ IsRowMajor?m_id:m_lhsIter.outer());
+ if(m_lhsIter && m_lhsIter.index()==m_id)
+ {
+ m_value = m_functor(m_lhsIter.value(), rhsVal);
+ ++m_lhsIter;
+ }
+ else
+ m_value = m_functor(Scalar(0),rhsVal);
+ }
+
+ return *this;
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
+
+ EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
+ EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_lhsIter.outer() : m_id; }
+ EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_lhsIter.outer(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_id<m_innerSize; }
+
+ protected:
+ LhsIterator m_lhsIter;
+ const evaluator<Rhs> &m_rhsEval;
+ const BinaryOp& m_functor;
+ Scalar m_value;
+ StorageIndex m_id;
+ StorageIndex m_innerSize;
+ };
+
+
+ enum {
+ CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ // Expose storage order of the sparse expression
+ Flags = (XprType::Flags & ~RowMajorBit) | (int(Lhs::Flags)&RowMajorBit)
+ };
+
+ explicit binary_evaluator(const XprType& xpr)
+ : m_functor(xpr.functor()),
+ m_lhsImpl(xpr.lhs()),
+ m_rhsImpl(xpr.rhs()),
+ m_expr(xpr)
+ {
+ EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
+ EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
+ }
+
+ inline Index nonZerosEstimate() const {
+ return m_expr.size();
+ }
+
+protected:
+ const BinaryOp m_functor;
+ evaluator<Lhs> m_lhsImpl;
+ evaluator<Rhs> m_rhsImpl;
+ const XprType &m_expr;
+};
+
// "sparse .* sparse"
template<typename T, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs>, IteratorBased, IteratorBased>
@@ -287,7 +456,8 @@ public:
enum {
CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
- Flags = XprType::Flags
+ // Expose storage order of the sparse expression
+ Flags = (XprType::Flags & ~RowMajorBit) | (int(Rhs::Flags)&RowMajorBit)
};
explicit binary_evaluator(const XprType& xpr)
@@ -360,7 +530,8 @@ public:
enum {
CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
- Flags = XprType::Flags
+ // Expose storage order of the sparse expression
+ Flags = (XprType::Flags & ~RowMajorBit) | (int(Lhs::Flags)&RowMajorBit)
};
explicit binary_evaluator(const XprType& xpr)
@@ -428,6 +599,34 @@ SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) c
return typename CwiseProductDenseReturnType<OtherDerived>::Type(derived(), other.derived());
}
+template<typename DenseDerived, typename SparseDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>
+operator+(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
+{
+ return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
+}
+
+template<typename SparseDerived, typename DenseDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
+operator+(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
+{
+ return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
+}
+
+template<typename DenseDerived, typename SparseDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>
+operator-(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
+{
+ return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
+}
+
+template<typename SparseDerived, typename DenseDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
+operator-(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
+{
+ return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
+}
+
} // end namespace Eigen
#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h
index 87c946b9b..c9da8a2bb 100644
--- a/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -48,7 +48,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
// It basically represents the minimal amount of work to be done to be worth it.
if(threads>1 && lhsEval.nonZerosEstimate() > 20000)
{
- #pragma omp parallel for schedule(static) num_threads(threads)
+ #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
diff --git a/Eigen/src/SparseCore/SparseMap.h b/Eigen/src/SparseCore/SparseMap.h
index 36c09ab0c..eb241c3e2 100644
--- a/Eigen/src/SparseCore/SparseMap.h
+++ b/Eigen/src/SparseCore/SparseMap.h
@@ -37,11 +37,15 @@ struct traits<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, St
};
} // end namespace internal
-
+
template<typename Derived,
int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
> class SparseMapBase;
+/** \ingroup SparseCore_Module
+ * class SparseMapBase
+ * \brief Common base class for Map and Ref instance of sparse matrix and vector.
+ */
template<typename Derived>
class SparseMapBase<Derived,ReadOnlyAccessors>
: public SparseCompressedBase<Derived>
@@ -71,22 +75,33 @@ class SparseMapBase<Derived,ReadOnlyAccessors>
public:
+ /** \copydoc SparseMatrixBase::rows() */
inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ /** \copydoc SparseMatrixBase::cols() */
inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+ /** \copydoc SparseMatrixBase::innerSize() */
inline Index innerSize() const { return m_innerSize; }
+ /** \copydoc SparseMatrixBase::outerSize() */
inline Index outerSize() const { return m_outerSize; }
+ /** \copydoc SparseCompressedBase::nonZeros */
inline Index nonZeros() const { return m_zero_nnz[1]; }
+ /** \copydoc SparseCompressedBase::isCompressed */
bool isCompressed() const { return m_innerNonZeros==0; }
//----------------------------------------
// direct access interface
+ /** \copydoc SparseMatrix::valuePtr */
inline const Scalar* valuePtr() const { return m_values; }
+ /** \copydoc SparseMatrix::innerIndexPtr */
inline const StorageIndex* innerIndexPtr() const { return m_innerIndices; }
+ /** \copydoc SparseMatrix::outerIndexPtr */
inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
+ /** \copydoc SparseMatrix::innerNonZeroPtr */
inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
//----------------------------------------
+ /** \copydoc SparseMatrix::coeff */
inline Scalar coeff(Index row, Index col) const
{
const Index outer = IsRowMajor ? row : col;
@@ -125,6 +140,10 @@ class SparseMapBase<Derived,ReadOnlyAccessors>
inline SparseMapBase() {}
};
+/** \ingroup SparseCore_Module
+ * class SparseMapBase
+ * \brief Common base class for writable Map and Ref instance of sparse matrix and vector.
+ */
template<typename Derived>
class SparseMapBase<Derived,WriteAccessors>
: public SparseMapBase<Derived,ReadOnlyAccessors>
@@ -185,9 +204,23 @@ class SparseMapBase<Derived,WriteAccessors>
inline SparseMapBase() {}
};
+/** \ingroup SparseCore_Module
+ *
+ * \brief Specialization of class Map for SparseMatrix-like storage.
+ *
+ * \tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix.
+ *
+ * \sa class Map, class SparseMatrix, class Ref<SparseMatrixType,Options>
+ */
+#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
: public SparseMapBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
+#else
+template<typename SparseMatrixType>
+class Map<SparseMatrixType>
+ : public SparseMapBase<Derived,WriteAccessors>
+#endif
{
public:
typedef SparseMapBase<Map> Base;
@@ -196,6 +229,12 @@ class Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
public:
+ /** Constructs a read-write Map to a sparse matrix of size \a rows x \a cols, containing \a nnz non-zero coefficients,
+ * stored as a sparse format as defined by the pointers \a outerIndexPtr, \a innerIndexPtr, and \a valuePtr.
+ * If the optional parameter \a innerNonZerosPtr is the null pointer, then a standard compressed format is assumed.
+ *
+ * More details on the expected storage schemes are given in the \ref TutorialSparse "manual pages".
+ */
inline Map(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr,
StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index 91bada40f..760e151eb 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -140,20 +140,20 @@ class SparseMatrix
/** \returns a const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
- inline const Scalar* valuePtr() const { return &m_data.value(0); }
+ inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
/** \returns a non-const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
- inline Scalar* valuePtr() { return &m_data.value(0); }
+ inline Scalar* valuePtr() { return m_data.valuePtr(); }
/** \returns a const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
- inline const StorageIndex* innerIndexPtr() const { return &m_data.index(0); }
+ inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
/** \returns a non-const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
- inline StorageIndex* innerIndexPtr() { return &m_data.index(0); }
+ inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
/** \returns a const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
@@ -538,7 +538,12 @@ class SparseMatrix
}
/** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
- * \sa reserve(), setZero()
+ *
+ * If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
+ * and the storage of the out of bounds coefficients is kept and reserved.
+ * Call makeCompressed() to pack the entries and squeeze extra memory.
+ *
+ * \sa reserve(), setZero(), makeCompressed()
*/
void conservativeResize(Index rows, Index cols)
{
@@ -735,8 +740,8 @@ class SparseMatrix
{
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
this->m_data.resize(rows());
- Eigen::Map<IndexVector>(&this->m_data.index(0), rows()).setLinSpaced(0, StorageIndex(rows()-1));
- Eigen::Map<ScalarVector>(&this->m_data.value(0), rows()).setOnes();
+ Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
+ Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
index 648ae1f8a..2a90f40bf 100644
--- a/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -18,7 +18,7 @@ namespace Eigen {
*
* \brief Base class of any sparse matrices or sparse expressions
*
- * \tparam Derived
+ * \tparam Derived is the derived type, e.g. a sparse matrix type, or an expression, etc.
*
* This class can be extended with the help of the plugin mechanism described on the page
* \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
diff --git a/Eigen/src/SparseCore/SparseRedux.h b/Eigen/src/SparseCore/SparseRedux.h
index 50ebb2e53..2a9718cfb 100644
--- a/Eigen/src/SparseCore/SparseRedux.h
+++ b/Eigen/src/SparseCore/SparseRedux.h
@@ -30,7 +30,7 @@ typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
SparseMatrix<_Scalar,_Options,_Index>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
- return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+ return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
}
template<typename _Scalar, int _Options, typename _Index>
@@ -38,7 +38,7 @@ typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
SparseVector<_Scalar,_Options,_Index>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
- return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+ return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
}
} // end namespace Eigen
diff --git a/Eigen/src/SparseCore/SparseRef.h b/Eigen/src/SparseCore/SparseRef.h
index 19e06fc80..a558230e7 100644
--- a/Eigen/src/SparseCore/SparseRef.h
+++ b/Eigen/src/SparseCore/SparseRef.h
@@ -13,7 +13,7 @@
namespace Eigen {
enum {
- StandardCompressedFormat = 2
+ StandardCompressedFormat = 2 /**< used by Ref<SparseMatrix> to specify whether the input storage must be in standard compressed form */
};
namespace internal {
@@ -108,20 +108,25 @@ protected:
/**
- * \ingroup Sparse_Module
+ * \ingroup SparseCore_Module
*
* \brief A sparse matrix expression referencing an existing sparse expression
*
- * \tparam PlainObjectType the equivalent sparse matrix type of the referenced data
+ * \tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix.
* \tparam Options specifies whether the a standard compressed format is required \c Options is \c #StandardCompressedFormat, or \c 0.
* The default is \c 0.
- * \tparam StrideType Only used for dense Ref
*
* \sa class Ref
*/
+#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType >
: public internal::SparseRefBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType > >
+#else
+template<typename SparseMatrixType, int Options>
+class Ref<SparseMatrixType, Options>
+ : public SparseMapBase<Derived,WriteAccessors> // yes, that's weird to use Derived here, but that works!
+#endif
{
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
typedef internal::traits<Ref> Traits;
@@ -155,6 +160,7 @@ class Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType >
template<typename Derived>
inline Ref(const SparseCompressedBase<Derived>& expr)
#else
+ /** Implicit constructor from any sparse expression (2D matrix or 1D vector) */
template<typename Derived>
inline Ref(SparseCompressedBase<Derived>& expr)
#endif
@@ -225,19 +231,23 @@ class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
/**
- * \ingroup Sparse_Module
+ * \ingroup SparseCore_Module
*
* \brief A sparse vector expression referencing an existing sparse vector expression
*
- * \tparam PlainObjectType the equivalent sparse matrix type of the referenced data
- * \tparam Options Not used for SparseVector.
- * \tparam StrideType Only used for dense Ref
+ * \tparam SparseVectorType the equivalent sparse vector type of the referenced data, it must be a template instance of class SparseVector.
*
* \sa class Ref
*/
+#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
class Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType >
: public internal::SparseRefBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType > >
+#else
+template<typename SparseVectorType>
+class Ref<SparseVectorType>
+ : public SparseMapBase<Derived,WriteAccessors>
+#endif
{
typedef SparseVector<MatScalar,MatOptions,MatIndex> PlainObjectType;
typedef internal::traits<Ref> Traits;
@@ -259,6 +269,7 @@ class Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType >
template<typename Derived>
inline Ref(const SparseCompressedBase<Derived>& expr)
#else
+ /** Implicit constructor from any 1D sparse vector expression */
template<typename Derived>
inline Ref(SparseCompressedBase<Derived>& expr)
#endif
diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h
index 46c6ce1d3..b92bb17e2 100644
--- a/Eigen/src/SparseCore/SparseSelfAdjointView.h
+++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -55,10 +55,10 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::StorageIndex StorageIndex;
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
- typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
- explicit inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
+ explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
{
eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
}
@@ -68,7 +68,7 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
/** \internal \returns a reference to the nested matrix */
const _MatrixTypeNested& matrix() const { return m_matrix; }
- _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
+ typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
/** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
*
@@ -158,7 +158,7 @@ template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
protected:
- typename MatrixType::Nested m_matrix;
+ MatrixTypeNested m_matrix;
//mutable VectorI m_countPerRow;
//mutable VectorI m_countPerCol;
private:
@@ -194,9 +194,9 @@ SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<Derive
{
SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
if(alpha==Scalar(0))
- m_matrix.const_cast_derived() = tmp.template triangularView<Mode>();
+ m_matrix = tmp.template triangularView<Mode>();
else
- m_matrix.const_cast_derived() += alpha * tmp.template triangularView<Mode>();
+ m_matrix += alpha * tmp.template triangularView<Mode>();
return *this;
}
@@ -211,8 +211,6 @@ struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
{
typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
typedef SparseSelfAdjointShape Shape;
-
- static const int AssumeAliasing = 0;
};
struct SparseSelfAdjoint2Sparse {};
@@ -389,7 +387,10 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
typedef typename MatrixType::Scalar Scalar;
typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
+ typedef evaluator<MatrixType> MatEval;
+ typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
+ MatEval matEval(mat);
Dest& dest(_dest.derived());
enum {
StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
@@ -403,7 +404,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
for(Index j = 0; j<size; ++j)
{
Index jp = perm ? perm[j] : j;
- for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ for(MatIterator it(matEval,j); it; ++it)
{
Index i = it.index();
Index r = it.row();
@@ -433,7 +434,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
// copy data
for(StorageIndex j = 0; j<size; ++j)
{
- for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ for(MatIterator it(matEval,j); it; ++it)
{
StorageIndex i = internal::convert_index<StorageIndex>(it.index());
Index r = it.row();
@@ -476,12 +477,17 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
typedef typename MatrixType::Scalar Scalar;
SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
+ typedef evaluator<MatrixType> MatEval;
+ typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
+
enum {
SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
StorageOrderMatch = int(SrcOrder) == int(DstOrder),
DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
};
+
+ MatEval matEval(mat);
Index size = mat.rows();
VectorI count(size);
@@ -490,7 +496,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
for(StorageIndex j = 0; j<size; ++j)
{
StorageIndex jp = perm ? perm[j] : j;
- for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ for(MatIterator it(matEval,j); it; ++it)
{
StorageIndex i = it.index();
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
@@ -510,7 +516,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
for(StorageIndex j = 0; j<size; ++j)
{
- for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ for(MatIterator it(matEval,j); it; ++it)
{
StorageIndex i = it.index();
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
diff --git a/Eigen/src/SparseCore/SparseTriangularView.h b/Eigen/src/SparseCore/SparseTriangularView.h
index 7c718e4e1..0c27855d5 100644
--- a/Eigen/src/SparseCore/SparseTriangularView.h
+++ b/Eigen/src/SparseCore/SparseTriangularView.h
@@ -43,9 +43,6 @@ template<typename MatrixType, unsigned int Mode> class TriangularViewImpl<Matrix
EIGEN_SPARSE_PUBLIC_INTERFACE(TriangularViewType)
- class InnerIterator;
- class ReverseInnerIterator;
-
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
@@ -63,108 +60,6 @@ template<typename MatrixType, unsigned int Mode> class TriangularViewImpl<Matrix
};
-template<typename MatrixType, unsigned int Mode>
-class TriangularViewImpl<MatrixType,Mode,Sparse>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator
-{
- typedef typename MatrixTypeNestedCleaned::InnerIterator Base;
- public:
-
- EIGEN_STRONG_INLINE InnerIterator(const TriangularViewImpl& view, Index outer)
- : Base(view.derived().nestedExpression(), outer), m_returnOne(false)
- {
- if(SkipFirst)
- {
- while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
- Base::operator++();
- if(HasUnitDiag)
- m_returnOne = true;
- }
- else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
- {
- if((!SkipFirst) && Base::operator bool())
- Base::operator++();
- m_returnOne = true;
- }
- }
-
- EIGEN_STRONG_INLINE InnerIterator& operator++()
- {
- if(HasUnitDiag && m_returnOne)
- m_returnOne = false;
- else
- {
- Base::operator++();
- if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))
- {
- if((!SkipFirst) && Base::operator bool())
- Base::operator++();
- m_returnOne = true;
- }
- }
- return *this;
- }
-
- inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); }
- inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); }
- inline StorageIndex index() const
- {
- if(HasUnitDiag && m_returnOne) return Base::outer();
- else return Base::index();
- }
- inline Scalar value() const
- {
- if(HasUnitDiag && m_returnOne) return Scalar(1);
- else return Base::value();
- }
-
- EIGEN_STRONG_INLINE operator bool() const
- {
- if(HasUnitDiag && m_returnOne)
- return true;
- if(SkipFirst) return Base::operator bool();
- else
- {
- if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
- else return (Base::operator bool() && this->index() <= this->outer());
- }
- }
- protected:
- bool m_returnOne;
-};
-
-template<typename MatrixType, unsigned int Mode>
-class TriangularViewImpl<MatrixType,Mode,Sparse>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator
-{
- typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base;
- public:
-
- EIGEN_STRONG_INLINE ReverseInnerIterator(const TriangularViewType& view, Index outer)
- : Base(view.derived().nestedExpression(), outer)
- {
- eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal");
- if(SkipLast) {
- while((*this) && (SkipDiag ? this->index()>=outer : this->index()>outer))
- --(*this);
- }
- }
-
- EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
- { Base::operator--(); return *this; }
-
- inline Index row() const { return Base::row(); }
- inline Index col() const { return Base::col(); }
-
- EIGEN_STRONG_INLINE operator bool() const
- {
- if (SkipLast) return Base::operator bool() ;
- else
- {
- if(SkipDiag) return (Base::operator bool() && this->index() > this->outer());
- else return (Base::operator bool() && this->index() >= this->outer());
- }
- }
-};
-
namespace internal {
template<typename ArgType, unsigned int Mode>
@@ -193,7 +88,7 @@ public:
Flags = XprType::Flags
};
- explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()) {}
+ explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()), m_arg(xpr.nestedExpression()) {}
inline Index nonZerosEstimate() const {
return m_argImpl.nonZerosEstimate();
@@ -205,20 +100,20 @@ public:
public:
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& xprEval, Index outer)
- : Base(xprEval.m_argImpl,outer), m_returnOne(false)
+ : Base(xprEval.m_argImpl,outer), m_returnOne(false), m_containsDiag(Base::outer()<xprEval.m_arg.innerSize())
{
if(SkipFirst)
{
while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
Base::operator++();
if(HasUnitDiag)
- m_returnOne = true;
+ m_returnOne = m_containsDiag;
}
else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
{
if((!SkipFirst) && Base::operator bool())
Base::operator++();
- m_returnOne = true; // FIXME check innerSize()>outer();
+ m_returnOne = m_containsDiag;
}
}
@@ -233,7 +128,7 @@ public:
{
if((!SkipFirst) && Base::operator bool())
Base::operator++();
- m_returnOne = true; // FIXME check innerSize()>outer();
+ m_returnOne = m_containsDiag;
}
}
return *this;
@@ -266,12 +161,14 @@ public:
protected:
bool m_returnOne;
+ bool m_containsDiag;
private:
Scalar& valueRef();
};
protected:
evaluator<ArgType> m_argImpl;
+ const ArgType& m_arg;
};
} // end namespace internal
diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h
index 7ec73a365..167a9886c 100644
--- a/Eigen/src/SparseCore/SparseVector.h
+++ b/Eigen/src/SparseCore/SparseVector.h
@@ -83,11 +83,11 @@ class SparseVector
EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
- EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); }
- EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); }
+ EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); }
+ EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); }
- EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return &m_data.index(0); }
- EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return &m_data.index(0); }
+ EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
+ EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
inline const StorageIndex* outerIndexPtr() const { return 0; }
inline StorageIndex* outerIndexPtr() { return 0; }
@@ -125,6 +125,7 @@ class SparseVector
inline Scalar& coeffRef(Index i)
{
eigen_assert(i>=0 && i<m_size);
+
return m_data.atWithInsertion(StorageIndex(i));
}
@@ -205,23 +206,54 @@ class SparseVector
inline void finalize() {}
+ /** \copydoc SparseMatrix::prune(const Scalar&,const RealScalar&) */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{
m_data.prune(reference,epsilon);
}
+ /** Resizes the sparse vector to \a rows x \a cols
+ *
+ * This method is provided for compatibility with matrices.
+ * For a column vector, \a cols must be equal to 1.
+ * For a row vector, \a rows must be equal to 1.
+ *
+ * \sa resize(Index)
+ */
void resize(Index rows, Index cols)
{
eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1");
resize(IsColVector ? rows : cols);
}
+ /** Resizes the sparse vector to \a newSize
+ * This method deletes all entries, thus leaving an empty sparse vector
+ *
+ * \sa conservativeResize(), setZero() */
void resize(Index newSize)
{
m_size = newSize;
m_data.clear();
}
+ /** Resizes the sparse vector to \a newSize, while leaving old values untouched.
+ *
+ * If the size of the vector is decreased, then the storage of the out-of bounds coefficients is kept and reserved.
+ * Call .data().squeeze() to free extra memory.
+ *
+ * \sa reserve(), setZero()
+ */
+ void conservativeResize(Index newSize)
+ {
+ if (newSize < m_size)
+ {
+ Index i = 0;
+ while (i<m_data.size() && m_data.index(i)<newSize) ++i;
+ m_data.resize(i);
+ }
+ m_size = newSize;
+ }
+
void resizeNonZeros(Index size) { m_data.resize(size); }
inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }
diff --git a/Eigen/src/SparseCore/SparseView.h b/Eigen/src/SparseCore/SparseView.h
index c945c4dab..b867877d8 100644
--- a/Eigen/src/SparseCore/SparseView.h
+++ b/Eigen/src/SparseCore/SparseView.h
@@ -38,7 +38,7 @@ public:
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
explicit SparseView(const MatrixType& mat, const Scalar& reference = Scalar(0),
- RealScalar epsilon = NumTraits<Scalar>::dummy_precision())
+ const RealScalar &epsilon = NumTraits<Scalar>::dummy_precision())
: m_matrix(mat), m_reference(reference), m_epsilon(epsilon) {}
inline Index rows() const { return m_matrix.rows(); }