aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen/src/Sparse
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2009-01-19 15:20:45 +0000
committerGravatar Gael Guennebaud <g.gael@free.fr>2009-01-19 15:20:45 +0000
commit178858f1bd4f0661f355d17058d87f8c56a4c0c1 (patch)
tree2889df07300034e8567911e7cf4cad7786e2e762 /Eigen/src/Sparse
parent385fd3d918024f0e954b40b1b729887b16f43788 (diff)
add a flexible sparse matrix class designed for fast matrix assembly
Diffstat (limited to 'Eigen/src/Sparse')
-rw-r--r--Eigen/src/Sparse/CompressedStorage.h84
-rw-r--r--Eigen/src/Sparse/DynamicSparseMatrix.h284
-rw-r--r--Eigen/src/Sparse/SparseMatrix.h21
-rw-r--r--Eigen/src/Sparse/SparseMatrixBase.h7
-rw-r--r--Eigen/src/Sparse/SparseProduct.h2
-rw-r--r--Eigen/src/Sparse/SparseUtil.h40
-rw-r--r--Eigen/src/Sparse/SparseVector.h52
7 files changed, 412 insertions, 78 deletions
diff --git a/Eigen/src/Sparse/CompressedStorage.h b/Eigen/src/Sparse/CompressedStorage.h
index da85aad0b..d66cb5c94 100644
--- a/Eigen/src/Sparse/CompressedStorage.h
+++ b/Eigen/src/Sparse/CompressedStorage.h
@@ -43,6 +43,7 @@ class CompressedStorage
}
CompressedStorage(const CompressedStorage& other)
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
{
*this = other;
}
@@ -97,15 +98,15 @@ class CompressedStorage
m_indices[id] = i;
}
- int size() const { return m_size; }
- int allocatedSize() const { return m_allocatedSize; }
- void clear() { m_size = 0; }
+ inline int size() const { return m_size; }
+ inline int allocatedSize() const { return m_allocatedSize; }
+ inline void clear() { m_size = 0; }
- Scalar& value(int i) { return m_values[i]; }
- const Scalar& value(int i) const { return m_values[i]; }
+ inline Scalar& value(int i) { return m_values[i]; }
+ inline const Scalar& value(int i) const { return m_values[i]; }
- int& index(int i) { return m_indices[i]; }
- const int& index(int i) const { return m_indices[i]; }
+ inline int& index(int i) { return m_indices[i]; }
+ inline const int& index(int i) const { return m_indices[i]; }
static CompressedStorage Map(int* indices, Scalar* values, int size)
{
@@ -115,10 +116,77 @@ class CompressedStorage
res.m_allocatedSize = res.m_size = size;
return res;
}
+
+ /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
+ inline int searchLowerIndex(int key) const
+ {
+ return searchLowerIndex(0, m_size, key);
+ }
+
+ /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
+ inline int searchLowerIndex(int start, int end, int key) const
+ {
+ while(end>start)
+ {
+ int mid = (end+start)>>1;
+ if (m_indices[mid]<key)
+ start = mid+1;
+ else
+ end = mid;
+ }
+ return start;
+ }
+
+ /** \returns the stored value at index \a key
+ * If the value does not exist, then the value \a defaultValue is returned without any insertion. */
+ inline Scalar at(int key, Scalar defaultValue = Scalar(0)) const
+ {
+ if (m_size==0)
+ return defaultValue;
+ else if (key==m_indices[m_size-1])
+ return m_values[m_size-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+ const int id = searchLowerIndex(0,m_size-1,key);
+ return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+ }
+
+ /** Like at(), but the search is performed in the range [start,end) */
+ inline Scalar atInRange(int start, int end, int key, Scalar defaultValue = Scalar(0)) const
+ {
+ if (start==end)
+ return Scalar(0);
+ else if (end>start && key==m_indices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+ const int id = searchLowerIndex(start,end-1,key);
+ return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+ }
+
+ /** \returns a reference to the value at index \a key
+ * If the value does not exist, then the value \a defaultValue is inserted
+ * such that the keys are sorted. */
+ inline Scalar& atWithInsertion(int key, Scalar defaultValue = Scalar(0))
+ {
+ int id = searchLowerIndex(0,m_size,key);
+ if (id>=m_size || m_indices[id]!=key)
+ {
+ resize(m_size+1,1);
+ for (int j=m_size-1; j>id; --j)
+ {
+ m_indices[j] = m_indices[j-1];
+ m_values[j] = m_values[j-1];
+ }
+ m_indices[id] = key;
+ m_values[id] = defaultValue;
+ }
+ return m_values[id];
+ }
protected:
- void reallocate(int size)
+ inline void reallocate(int size)
{
Scalar* newValues = new Scalar[size];
int* newIndices = new int[size];
diff --git a/Eigen/src/Sparse/DynamicSparseMatrix.h b/Eigen/src/Sparse/DynamicSparseMatrix.h
new file mode 100644
index 000000000..30e54a981
--- /dev/null
+++ b/Eigen/src/Sparse/DynamicSparseMatrix.h
@@ -0,0 +1,284 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H
+#define EIGEN_DYNAMIC_SPARSEMATRIX_H
+
+/** \class DynamicSparseMatrix
+ *
+ * \brief A sparse matrix class designed for matrix assembly purpose
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * Unlike SparseMatrix, this class provides a much higher degree of flexibility. In particular, it allows
+ * random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is
+ * nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows
+ * otherwise.
+ *
+ * Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might
+ * decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance
+ * till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors.
+ *
+ * \see SparseMatrix
+ */
+template<typename _Scalar, int _Flags>
+struct ei_traits<DynamicSparseMatrix<_Scalar, _Flags> >
+{
+ typedef _Scalar Scalar;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = SparseBit | _Flags,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = OuterRandomAccessPattern
+ };
+};
+
+template<typename _Scalar, int _Flags>
+class DynamicSparseMatrix
+ : public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Flags> >
+{
+ public:
+ EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(DynamicSparseMatrix)
+ typedef MappedSparseMatrix<Scalar,Flags> Map;
+
+ protected:
+
+ enum { IsRowMajor = Base::IsRowMajor };
+ typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+ int m_innerSize;
+ std::vector<CompressedStorage<Scalar> > m_data;
+
+ public:
+
+ inline int rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
+ inline int cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
+ inline int innerSize() const { return m_innerSize; }
+ inline int outerSize() const { return m_data.size(); }
+ inline int innerNonZeros(int j) const { return m_data[j].size(); }
+
+ /** \returns the coefficient value at given position \a row, \a col
+ * This operation involes a log(rho*outer_size) binary search.
+ */
+ inline Scalar coeff(int row, int col) const
+ {
+ const int outer = IsRowMajor ? row : col;
+ const int inner = IsRowMajor ? col : row;
+ return m_data[outer].at(inner);
+ }
+
+ /** \returns a reference to the coefficient value at given position \a row, \a col
+ * This operation involes a log(rho*outer_size) binary search. If the coefficient does not
+ * exist yet, then a sorted insertion into a sequential buffer is performed.
+ */
+ inline Scalar& coeffRef(int row, int col)
+ {
+ const int outer = IsRowMajor ? row : col;
+ const int inner = IsRowMajor ? col : row;
+ return m_data[outer].atWithInsertion(inner);
+ }
+
+ public:
+
+ class InnerIterator;
+
+ inline void setZero()
+ {
+ for (int j=0; j<outerSize(); ++j)
+ m_data[j].clear();
+ }
+
+ /** \returns the number of non zero coefficients */
+ inline int nonZeros() const
+ {
+ int res = 0;
+ for (int j=0; j<outerSize(); ++j)
+ res += m_data[j].size();
+ return res;
+ }
+
+ /** Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
+ inline void startFill(int reserveSize = 1000)
+ {
+ int reserveSizePerVector = std::max(reserveSize/outerSize(),4);
+ for (int j=0; j<outerSize(); ++j)
+ {
+ m_data[j].clear();
+ m_data[j].reserve(reserveSizePerVector);
+ }
+ }
+
+ /** inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
+ * 1 - the coefficient does not exist yet
+ * 2 - this the coefficient with greater inner coordinate for the given outer coordinate.
+ * In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
+ * \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid.
+ *
+ * \see fillrand(), coeffRef()
+ */
+ inline Scalar& fill(int row, int col)
+ {
+ const int outer = IsRowMajor ? row : col;
+ const int inner = IsRowMajor ? col : row;
+ ei_assert(outer<int(m_data.size()) && inner<m_innerSize);
+ ei_assert((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner));
+ m_data[outer].append(0, inner);
+ return m_data[outer].value(m_data[outer].size()-1);
+ }
+
+ /** Like fill() but with random inner coordinates.
+ * Compared to the generic coeffRef(), the unique limitation is that we assume
+ * the coefficient does not exist yet.
+ */
+ inline Scalar& fillrand(int row, int col)
+ {
+ const int outer = IsRowMajor ? row : col;
+ const int inner = IsRowMajor ? col : row;
+
+ int startId = 0;
+ int id = m_data[outer].size() - 1;
+ m_data[outer].resize(id+2,1);
+
+ while ( (id >= startId) && (m_data[outer].index(id) > inner) )
+ {
+ m_data[outer].index(id+1) = m_data[outer].index(id);
+ m_data[outer].value(id+1) = m_data[outer].value(id);
+ --id;
+ }
+ m_data[outer].index(id+1) = inner;
+ m_data[outer].value(id+1) = 0;
+ return m_data[outer].value(id+1);
+ }
+
+ /** Does nothing. Provided for compatibility with SparseMatrix. */
+ inline void endFill() {}
+
+ /** Resize the matrix without preserving the data (the matrix is set to zero)
+ */
+ void resize(int rows, int cols)
+ {
+ const int outerSize = IsRowMajor ? rows : cols;
+ m_innerSize = IsRowMajor ? cols : rows;
+ setZero();
+ if (int(m_data.size()) != outerSize)
+ {
+ m_data.resize(outerSize);
+ }
+ }
+
+ void resizeAndKeepData(int rows, int cols)
+ {
+ const int outerSize = IsRowMajor ? rows : cols;
+ const int innerSize = IsRowMajor ? cols : rows;
+ if (m_innerSize>innerSize)
+ {
+ // remove all coefficients with innerCoord>=innerSize
+ // TODO
+ std::cerr << "not implemented yet\n";
+ exit(2);
+ }
+ if (m_data.size() != outerSize)
+ {
+ m_data.resize(outerSize);
+ }
+ }
+
+ inline DynamicSparseMatrix()
+ : m_innerSize(0)
+ {
+ ei_assert(innerSize()==0 && outerSize()==0);
+ }
+
+ inline DynamicSparseMatrix(int rows, int cols)
+ : m_innerSize(0)
+ {
+ resize(rows, cols);
+ }
+
+ template<typename OtherDerived>
+ inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+ : m_innerSize(0)
+ {
+ *this = other.derived();
+ }
+
+ inline DynamicSparseMatrix(const DynamicSparseMatrix& other)
+ : m_innerSize(0)
+ {
+ *this = other.derived();
+ }
+
+ inline void swap(DynamicSparseMatrix& other)
+ {
+ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+ std::swap(m_innerSize, other.m_innerSize);
+ //std::swap(m_outerSize, other.m_outerSize);
+ m_data.swap(other.m_data);
+ }
+
+ inline DynamicSparseMatrix& operator=(const DynamicSparseMatrix& other)
+ {
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.rows(), other.cols());
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ inline DynamicSparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ return SparseMatrixBase<DynamicSparseMatrix>::operator=(other.derived());
+ }
+
+ /** Destructor */
+ inline ~DynamicSparseMatrix() {}
+};
+
+template<typename Scalar, int _Flags>
+class DynamicSparseMatrix<Scalar,_Flags>::InnerIterator : public SparseVector<Scalar,_Flags>::InnerIterator
+{
+ typedef typename SparseVector<Scalar,_Flags>::InnerIterator Base;
+ public:
+ InnerIterator(const DynamicSparseMatrix& mat, int outer)
+ : Base(mat.m_data[outer]), m_outer(outer)
+ {}
+
+ inline int row() const { return IsRowMajor ? m_outer : Base::index(); }
+ inline int col() const { return IsRowMajor ? Base::index() : m_outer; }
+
+
+ protected:
+ const int m_outer;
+};
+
+#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H
diff --git a/Eigen/src/Sparse/SparseMatrix.h b/Eigen/src/Sparse/SparseMatrix.h
index d90a7c69f..2ce466428 100644
--- a/Eigen/src/Sparse/SparseMatrix.h
+++ b/Eigen/src/Sparse/SparseMatrix.h
@@ -45,7 +45,7 @@ struct ei_traits<SparseMatrix<_Scalar, _Flags> >
MaxColsAtCompileTime = Dynamic,
Flags = SparseBit | _Flags,
CoeffReadCost = NumTraits<Scalar>::ReadCost,
- SupportedAccessPatterns = FullyCoherentAccessPattern
+ SupportedAccessPatterns = InnerRandomAccessPattern
};
};
@@ -91,19 +91,7 @@ class SparseMatrix
{
const int outer = IsRowMajor ? row : col;
const int inner = IsRowMajor ? col : row;
-
- int start = m_outerIndex[outer];
- int end = m_outerIndex[outer+1];
- if (start==end)
- return Scalar(0);
- else if (end>0 && inner==m_data.index(end-1))
- return m_data.value(end-1);
- // ^^ optimization: let's first check if it is the last coefficient
- // (very common in high level algorithms)
-
- const int* r = std::lower_bound(&m_data.index(start),&m_data.index(end-1),inner);
- const int id = r-&m_data.index(0);
- return ((*r==inner) && (id<end)) ? m_data.value(id) : Scalar(0);
+ return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer+1], inner);
}
inline Scalar& coeffRef(int row, int col)
@@ -115,9 +103,8 @@ class SparseMatrix
int end = m_outerIndex[outer+1];
ei_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
ei_assert(end>start && "coeffRef cannot be called on a zero coefficient");
- int* r = std::lower_bound(&m_data.index(start),&m_data.index(end),inner);
- const int id = r-&m_data.index(0);
- ei_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
+ const int id = m_data.searchLowerIndex(start,end-1,inner);
+ ei_assert((id<end) && (m_data.index(id)==inner) && "coeffRef cannot be called on a zero coefficient");
return m_data.value(id);
}
diff --git a/Eigen/src/Sparse/SparseMatrixBase.h b/Eigen/src/Sparse/SparseMatrixBase.h
index dd4eeff16..32011afaa 100644
--- a/Eigen/src/Sparse/SparseMatrixBase.h
+++ b/Eigen/src/Sparse/SparseMatrixBase.h
@@ -69,7 +69,7 @@ template<typename Derived> class SparseMatrixBase
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
* constructed from this one. See the \ref flags "list of flags".
*/
-
+
CoeffReadCost = ei_traits<Derived>::CoeffReadCost,
/**< This is a rough measure of how expensive it is to read one coefficient from
* this expression.
@@ -153,7 +153,10 @@ template<typename Derived> class SparseMatrixBase
{
// std::cout << "Derived& operator=(const MatrixBase<OtherDerived>& other)\n";
//const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
- ei_assert((!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit))) && "the transpose operation is supposed to be handled in SparseMatrix::operator=");
+ ei_assert(( ((ei_traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
+ (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
+ "the transpose operation is supposed to be handled in SparseMatrix::operator=");
+
const int outerSize = other.outerSize();
//typedef typename ei_meta_if<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::ret TempType;
// thanks to shallow copies, we always eval to a tempary
diff --git a/Eigen/src/Sparse/SparseProduct.h b/Eigen/src/Sparse/SparseProduct.h
index 5a2c294a2..06ea703f8 100644
--- a/Eigen/src/Sparse/SparseProduct.h
+++ b/Eigen/src/Sparse/SparseProduct.h
@@ -246,7 +246,7 @@ struct ei_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
{
// let's transpose the product to get a column x column product
SparseTemporaryType _res(res.cols(), res.rows());
- ei_sparse_product_selector<Rhs,Lhs,ResultType,ColMajor,ColMajor,ColMajor>
+ ei_sparse_product_selector<Rhs,Lhs,SparseTemporaryType,ColMajor,ColMajor,ColMajor>
::run(rhs, lhs, _res);
res = _res.transpose();
}
diff --git a/Eigen/src/Sparse/SparseUtil.h b/Eigen/src/Sparse/SparseUtil.h
index 286954fd6..18d0ee238 100644
--- a/Eigen/src/Sparse/SparseUtil.h
+++ b/Eigen/src/Sparse/SparseUtil.h
@@ -102,30 +102,36 @@ enum {
template<typename Derived> class SparseMatrixBase;
template<typename _Scalar, int _Flags = 0> class SparseMatrix;
+template<typename _Scalar, int _Flags = 0> class DynamicSparseMatrix;
template<typename _Scalar, int _Flags = 0> class SparseVector;
template<typename _Scalar, int _Flags = 0> class MappedSparseMatrix;
-template<typename MatrixType> class SparseTranspose;
-template<typename MatrixType> class SparseInnerVector;
-template<typename Derived> class SparseCwise;
-template<typename UnaryOp, typename MatrixType> class SparseCwiseUnaryOp;
-template<typename BinaryOp, typename Lhs, typename Rhs> class SparseCwiseBinaryOp;
-template<typename ExpressionType, unsigned int Added, unsigned int Removed> class SparseFlagged;
+template<typename MatrixType> class SparseTranspose;
+template<typename MatrixType> class SparseInnerVector;
+template<typename Derived> class SparseCwise;
+template<typename UnaryOp, typename MatrixType> class SparseCwiseUnaryOp;
+template<typename BinaryOp, typename Lhs, typename Rhs> class SparseCwiseBinaryOp;
+template<typename ExpressionType,
+ unsigned int Added, unsigned int Removed> class SparseFlagged;
template<typename Lhs, typename Rhs> struct ei_sparse_product_mode;
template<typename Lhs, typename Rhs, int ProductMode = ei_sparse_product_mode<Lhs,Rhs>::value> struct SparseProductReturnType;
-const int AccessPatternNotSupported = 0x0;
-const int AccessPatternSupported = 0x1;
-
-
-template<typename MatrixType, int AccessPattern> struct ei_support_access_pattern
-{
- enum { ret = (int(ei_traits<MatrixType>::SupportedAccessPatterns) & AccessPattern) == AccessPattern
- ? AccessPatternSupported
- : AccessPatternNotSupported
- };
-};
+const int CoherentAccessPattern = 0x1;
+const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
+const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
+const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
+
+// const int AccessPatternNotSupported = 0x0;
+// const int AccessPatternSupported = 0x1;
+//
+// template<typename MatrixType, int AccessPattern> struct ei_support_access_pattern
+// {
+// enum { ret = (int(ei_traits<MatrixType>::SupportedAccessPatterns) & AccessPattern) == AccessPattern
+// ? AccessPatternSupported
+// : AccessPatternNotSupported
+// };
+// };
template<typename T> class ei_eval<T,IsSparse>
{
diff --git a/Eigen/src/Sparse/SparseVector.h b/Eigen/src/Sparse/SparseVector.h
index 479b678e1..39c1e66ac 100644
--- a/Eigen/src/Sparse/SparseVector.h
+++ b/Eigen/src/Sparse/SparseVector.h
@@ -47,12 +47,10 @@ struct ei_traits<SparseVector<_Scalar, _Flags> >
MaxColsAtCompileTime = ColsAtCompileTime,
Flags = SparseBit | _Flags,
CoeffReadCost = NumTraits<Scalar>::ReadCost,
- SupportedAccessPatterns = FullyCoherentAccessPattern
+ SupportedAccessPatterns = InnerRandomAccessPattern
};
};
-
-
template<typename _Scalar, int _Flags>
class SparseVector
: public SparseMatrixBase<SparseVector<_Scalar, _Flags> >
@@ -89,22 +87,7 @@ class SparseVector
ei_assert((IsColVector ? col : row)==0);
return coeff(IsColVector ? row : col);
}
- inline Scalar coeff(int i) const
- {
- int start = 0;
- int end = m_data.size();
- if (start==end)
- return Scalar(0);
- else if (end>0 && i==m_data.index(end-1))
- return m_data.value(end-1);
- // ^^ optimization: let's first check if it is the last coefficient
- // (very common in high level algorithms)
-
- // TODO move this search to ScalarArray
- const int* r = std::lower_bound(&m_data.index(start),&m_data.index(end-1),i);
- const int id = r-&m_data.index(0);
- return ((*r==i) && (id<end)) ? m_data.value(id) : Scalar(0);
- }
+ inline Scalar coeff(int i) const { return m_data.at(i); }
inline Scalar& coeffRef(int row, int col)
{
@@ -112,16 +95,15 @@ class SparseVector
return coeff(IsColVector ? row : col);
}
+ /** \returns a reference to the coefficient value at given index \a i
+ * This operation involes a log(rho*size) binary search. If the coefficient does not
+ * exist yet, then a sorted insertion into a sequential buffer is performed.
+ *
+ * This insertion might be very costly if the number of nonzeros above \a i is large.
+ */
inline Scalar& coeffRef(int i)
{
- int start = 0;
- int end = m_data.size();
- ei_assert(end>=start && "you probably called coeffRef on a non finalized vector");
- ei_assert(end>start && "coeffRef cannot be called on a zero coefficient");
- int* r = std::lower_bound(&m_data.index(start),&m_data.index(end),i);
- const int id = r-&m_data.index(0);
- ei_assert((*r==i) && (id<end) && "coeffRef cannot be called on a zero coefficient");
- return m_data.value(id);
+ return m_data.atWithInsertiob(i);
}
public:
@@ -301,29 +283,33 @@ class SparseVector<Scalar,_Flags>::InnerIterator
{
public:
InnerIterator(const SparseVector& vec, int outer=0)
- : m_vector(vec), m_id(0), m_end(vec.nonZeros())
+ : m_data(vec.m_data), m_id(0), m_end(m_data.size())
{
ei_assert(outer==0);
}
+
+ InnerIterator(const CompressedStorage<Scalar>& data)
+ : m_data(data), m_id(0), m_end(m_data.size())
+ {}
template<unsigned int Added, unsigned int Removed>
InnerIterator(const Flagged<SparseVector,Added,Removed>& vec, int outer)
- : m_vector(vec._expression()), m_id(0), m_end(m_vector.nonZeros())
+ : m_data(vec._expression().m_data), m_id(0), m_end(m_data.size())
{}
inline InnerIterator& operator++() { m_id++; return *this; }
- inline Scalar value() const { return m_vector.m_data.value(m_id); }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_vector.m_data.value(m_id)); }
+ inline Scalar value() const { return m_data.value(m_id); }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
- inline int index() const { return m_vector.m_data.index(m_id); }
+ inline int index() const { return m_data.index(m_id); }
inline int row() const { return IsColVector ? index() : 0; }
inline int col() const { return IsColVector ? 0 : index(); }
inline operator bool() const { return (m_id < m_end); }
protected:
- const SparseVector& m_vector;
+ const CompressedStorage<Scalar>& m_data;
int m_id;
const int m_end;
};