aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2009-01-07 17:01:57 +0000
committerGravatar Gael Guennebaud <g.gael@free.fr>2009-01-07 17:01:57 +0000
commit709e9033355834e6d9166dd9eac975a383e55ece (patch)
tree2bc70cec7128d1eb15afd49ab6199ea67036a525
parent7078cfaeaa52db71229cb81c5da07d593b2e5174 (diff)
Sparse module:
* extend unit tests * add support for generic sum reduction and dot product * optimize the cwise()* : this is a special case of CwiseBinaryOp where we only have to process the coeffs which are not null for *both* matrices. Perhaps there exist some other binary operations like that ?
-rw-r--r--Eigen/Sparse1
-rw-r--r--Eigen/src/Sparse/CoreIterators.h149
-rw-r--r--Eigen/src/Sparse/SparseMatrixBase.h43
-rw-r--r--Eigen/src/Sparse/SparseRedux.h70
-rw-r--r--Eigen/src/Sparse/SparseVector.h22
-rw-r--r--Eigen/src/Sparse/SuperLUSupport.h18
-rw-r--r--test/CMakeLists.txt2
-rw-r--r--test/sparse.h24
-rw-r--r--test/sparse_vector.cpp92
9 files changed, 382 insertions, 39 deletions
diff --git a/Eigen/Sparse b/Eigen/Sparse
index ce65d4dad..168511c66 100644
--- a/Eigen/Sparse
+++ b/Eigen/Sparse
@@ -79,6 +79,7 @@ namespace Eigen {
#include "src/Sparse/SparseMatrix.h"
#include "src/Sparse/SparseVector.h"
#include "src/Sparse/CoreIterators.h"
+#include "src/Sparse/SparseRedux.h"
#include "src/Sparse/SparseProduct.h"
#include "src/Sparse/TriangularSolver.h"
#include "src/Sparse/SparseLLT.h"
diff --git a/Eigen/src/Sparse/CoreIterators.h b/Eigen/src/Sparse/CoreIterators.h
index ebe0bf8bd..b1d113a95 100644
--- a/Eigen/src/Sparse/CoreIterators.h
+++ b/Eigen/src/Sparse/CoreIterators.h
@@ -33,21 +33,21 @@ class MatrixBase<Derived>::InnerIterator
{
typedef typename Derived::Scalar Scalar;
public:
- InnerIterator(const Derived& mat, int outer)
+ EIGEN_STRONG_INLINE InnerIterator(const Derived& mat, int outer)
: m_matrix(mat), m_inner(0), m_outer(outer), m_end(mat.rows())
{}
- Scalar value() const
+ EIGEN_STRONG_INLINE Scalar value() const
{
return (Derived::Flags&RowMajorBit) ? m_matrix.coeff(m_outer, m_inner)
: m_matrix.coeff(m_inner, m_outer);
}
- InnerIterator& operator++() { m_inner++; return *this; }
+ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; return *this; }
- int index() const { return m_inner; }
+ EIGEN_STRONG_INLINE int index() const { return m_inner; }
- operator bool() const { return m_inner < m_end && m_inner>=0; }
+ EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
protected:
const Derived& m_matrix;
@@ -61,7 +61,7 @@ class Transpose<MatrixType>::InnerIterator : public MatrixType::InnerIterator
{
public:
- InnerIterator(const Transpose& trans, int outer)
+ EIGEN_STRONG_INLINE InnerIterator(const Transpose& trans, int outer)
: MatrixType::InnerIterator(trans.m_matrix, outer)
{}
};
@@ -74,7 +74,7 @@ class Block<MatrixType, BlockRows, BlockCols, PacketAccess, _DirectAccessStatus>
typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
public:
- InnerIterator(const Block& block, int outer)
+ EIGEN_STRONG_INLINE InnerIterator(const Block& block, int outer)
: m_iter(block.m_matrix,(Block::Flags&RowMajor) ? block.m_startRow.value() + outer : block.m_startCol.value() + outer),
m_start( (Block::Flags&RowMajor) ? block.m_startCol.value() : block.m_startRow.value()),
m_end(m_start + ((Block::Flags&RowMajor) ? block.m_blockCols.value() : block.m_blockRows.value())),
@@ -84,24 +84,24 @@ class Block<MatrixType, BlockRows, BlockCols, PacketAccess, _DirectAccessStatus>
++m_iter;
}
- InnerIterator& operator++()
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_iter;
return *this;
}
- Scalar value() const { return m_iter.value(); }
+ EIGEN_STRONG_INLINE Scalar value() const { return m_iter.value(); }
- int index() const { return m_iter.index() - m_offset; }
+ EIGEN_STRONG_INLINE int index() const { return m_iter.index() - m_offset; }
- operator bool() const { return m_iter && m_iter.index()<m_end; }
+ EIGEN_STRONG_INLINE operator bool() const { return m_iter && m_iter.index()<m_end; }
protected:
MatrixTypeIterator m_iter;
int m_start;
int m_end;
int m_offset;
-};
+};
template<typename MatrixType, int BlockRows, int BlockCols, int PacketAccess>
class Block<MatrixType, BlockRows, BlockCols, PacketAccess, IsSparse>::InnerIterator
@@ -111,7 +111,7 @@ class Block<MatrixType, BlockRows, BlockCols, PacketAccess, IsSparse>::InnerIter
typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
public:
- InnerIterator(const Block& block, int outer)
+ EIGEN_STRONG_INLINE InnerIterator(const Block& block, int outer)
: m_iter(block.m_matrix,(Block::Flags&RowMajor) ? block.m_startRow.value() + outer : block.m_startCol.value() + outer),
m_start( (Block::Flags&RowMajor) ? block.m_startCol.value() : block.m_startRow.value()),
m_end(m_start + ((Block::Flags&RowMajor) ? block.m_blockCols.value() : block.m_blockRows.value())),
@@ -121,17 +121,17 @@ class Block<MatrixType, BlockRows, BlockCols, PacketAccess, IsSparse>::InnerIter
++m_iter;
}
- InnerIterator& operator++()
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
{
++m_iter;
return *this;
}
- Scalar value() const { return m_iter.value(); }
+ EIGEN_STRONG_INLINE Scalar value() const { return m_iter.value(); }
- int index() const { return m_iter.index() - m_offset; }
+ EIGEN_STRONG_INLINE int index() const { return m_iter.index() - m_offset; }
- operator bool() const { return m_iter && m_iter.index()<m_end; }
+ EIGEN_STRONG_INLINE operator bool() const { return m_iter && m_iter.index()<m_end; }
protected:
MatrixTypeIterator m_iter;
@@ -148,13 +148,13 @@ class CwiseUnaryOp<UnaryOp,MatrixType>::InnerIterator
typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
public:
- InnerIterator(const CwiseUnaryOp& unaryOp, int outer)
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOp& unaryOp, int outer)
: m_iter(unaryOp.m_matrix,outer), m_functor(unaryOp.m_functor), m_id(-1)
{
this->operator++();
}
- InnerIterator& operator++()
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
{
if (m_iter)
{
@@ -169,11 +169,11 @@ class CwiseUnaryOp<UnaryOp,MatrixType>::InnerIterator
return *this;
}
- Scalar value() const { return m_value; }
+ EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
- int index() const { return m_id; }
+ EIGEN_STRONG_INLINE int index() const { return m_id; }
- operator bool() const { return m_id>=0; }
+ EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
protected:
MatrixTypeIterator m_iter;
@@ -182,23 +182,54 @@ class CwiseUnaryOp<UnaryOp,MatrixType>::InnerIterator
int m_id;
};
+template<typename T> struct ei_is_scalar_product { enum { ret = false }; };
+template<typename T> struct ei_is_scalar_product<ei_scalar_product_op<T> > { enum { ret = true }; };
+
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived>
+class CwiseBinaryOpInnerIterator;
+
template<typename BinaryOp, typename Lhs, typename Rhs>
class CwiseBinaryOp<BinaryOp,Lhs,Rhs>::InnerIterator
+ : public CwiseBinaryOpInnerIterator<BinaryOp,Lhs,Rhs, typename CwiseBinaryOp<BinaryOp,Lhs,Rhs>::InnerIterator>
{
+ typedef CwiseBinaryOpInnerIterator<
+ BinaryOp,Lhs,Rhs, typename CwiseBinaryOp<BinaryOp,Lhs,Rhs>::InnerIterator> Base;
+ public:
typedef typename CwiseBinaryOp::Scalar Scalar;
typedef typename ei_traits<CwiseBinaryOp>::_LhsNested _LhsNested;
typedef typename _LhsNested::InnerIterator LhsIterator;
typedef typename ei_traits<CwiseBinaryOp>::_RhsNested _RhsNested;
typedef typename _RhsNested::InnerIterator RhsIterator;
+// public:
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOp& binOp, int outer)
+ : Base(binOp.m_lhs,binOp.m_rhs,binOp.m_functor,outer)
+ {}
+};
+
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived>
+class CwiseBinaryOpInnerIterator
+{
+ typedef CwiseBinaryOp<BinaryOp,Lhs,Rhs> ExpressionType;
+ typedef typename ExpressionType::Scalar Scalar;
+ typedef typename ei_traits<ExpressionType>::_LhsNested _LhsNested;
+// typedef typename ei_traits<ExpressionType>::LhsIterator LhsIterator;
+ typedef typename ei_traits<ExpressionType>::_RhsNested _RhsNested;
+// typedef typename ei_traits<ExpressionType>::RhsIterator RhsIterator;
+// typedef typename ei_traits<CwiseBinaryOp>::_LhsNested _LhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+// typedef typename ei_traits<CwiseBinaryOp>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+// enum { IsProduct = ei_is_scalar_product<BinaryOp>::ret };
public:
- InnerIterator(const CwiseBinaryOp& binOp, int outer)
- : m_lhsIter(binOp.m_lhs,outer), m_rhsIter(binOp.m_rhs,outer), m_functor(binOp.m_functor), m_id(-1)
+ EIGEN_STRONG_INLINE CwiseBinaryOpInnerIterator(const _LhsNested& lhs, const _RhsNested& rhs,
+ const BinaryOp& functor, int outer)
+ : m_lhsIter(lhs,outer), m_rhsIter(rhs,outer), m_functor(functor), m_id(-1)
{
this->operator++();
}
- InnerIterator& operator++()
+ EIGEN_STRONG_INLINE Derived& operator++()
{
if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
{
@@ -223,14 +254,14 @@ class CwiseBinaryOp<BinaryOp,Lhs,Rhs>::InnerIterator
{
m_id = -1;
}
- return *this;
+ return *static_cast<Derived*>(this);
}
- Scalar value() const { return m_value; }
+ EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
- int index() const { return m_id; }
+ EIGEN_STRONG_INLINE int index() const { return m_id; }
- operator bool() const { return m_id>=0; }
+ EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
protected:
LhsIterator m_lhsIter;
@@ -239,5 +270,65 @@ class CwiseBinaryOp<BinaryOp,Lhs,Rhs>::InnerIterator
Scalar m_value;
int m_id;
};
+/*
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class CwiseBinaryOpInnerIterator<ei_scalar_product_op<T>,Lhs,Rhs,Derived>
+{
+ typedef typename CwiseBinaryOp::Scalar Scalar;
+ typedef typename ei_traits<CwiseBinaryOp>::_LhsNested _LhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename ei_traits<CwiseBinaryOp>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ public:
+
+ EIGEN_STRONG_INLINE CwiseBinaryOpInnerIterator(const CwiseBinaryOp& binOp, int outer)
+ : m_lhsIter(binOp.m_lhs,outer), m_rhsIter(binOp.m_rhs,outer), m_functor(binOp.m_functor)//, m_id(-1)
+ {
+ //this->operator++();
+ while (m_lhsIter && m_rhsIter && m_lhsIter.index() != m_rhsIter.index())
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+// m_id = -1;
+ asm("#beginwhile");
+ while (m_lhsIter && m_rhsIter)
+ {
+ if (m_lhsIter.index() == m_rhsIter.index())
+ {
+// m_id = m_lhsIter.index();
+ //m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
+ ++m_lhsIter;
+ ++m_rhsIter;
+ break;
+ }
+ else if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ asm("#endwhile");
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE int index() const { return m_lhsIter.index(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter && m_rhsIter; }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
+// Scalar m_value;
+// int m_id;
+};*/
#endif // EIGEN_COREITERATORS_H
diff --git a/Eigen/src/Sparse/SparseMatrixBase.h b/Eigen/src/Sparse/SparseMatrixBase.h
index 4ea95939f..d70c99259 100644
--- a/Eigen/src/Sparse/SparseMatrixBase.h
+++ b/Eigen/src/Sparse/SparseMatrixBase.h
@@ -167,6 +167,49 @@ class SparseMatrixBase : public MatrixBase<Derived>
return s;
}
+// template<typename OtherDerived>
+// Scalar dot(const MatrixBase<OtherDerived>& other) const
+// {
+// EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+// EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+// EIGEN_STATIC_ASSERT((ei_is_same_type<Scalar, typename OtherDerived::Scalar>::ret),
+// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+//
+// ei_assert(derived().size() == other.size());
+// // short version, but the assembly looks more complicated because
+// // of the CwiseBinaryOp iterator complexity
+// // return res = (derived().cwise() * other.derived().conjugate()).sum();
+//
+// // optimized, generic version
+// typename Derived::InnerIterator i(derived(),0);
+// typename OtherDerived::InnerIterator j(other.derived(),0);
+// Scalar res = 0;
+// while (i && j)
+// {
+// if (i.index()==j.index())
+// {
+// // std::cerr << i.value() << " * " << j.value() << "\n";
+// res += i.value() * ei_conj(j.value());
+// ++i; ++j;
+// }
+// else if (i.index()<j.index())
+// ++i;
+// else
+// ++j;
+// }
+// return res;
+// }
+//
+// Scalar sum() const
+// {
+// Scalar res = 0;
+// for (typename Derived::InnerIterator iter(*this,0); iter; ++iter)
+// {
+// res += iter.value();
+// }
+// return res;
+// }
+
protected:
bool m_isRValue;
diff --git a/Eigen/src/Sparse/SparseRedux.h b/Eigen/src/Sparse/SparseRedux.h
new file mode 100644
index 000000000..746ae2f8a
--- /dev/null
+++ b/Eigen/src/Sparse/SparseRedux.h
@@ -0,0 +1,70 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEREDUX_H
+#define EIGEN_SPARSEREDUX_H
+
+
+template<typename Derived, int Vectorization, int Unrolling>
+struct ei_sum_impl<Derived, Vectorization, Unrolling, IsSparse>
+{
+ typedef typename Derived::Scalar Scalar;
+ static Scalar run(const Derived& mat)
+ {
+ ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using a non initialized matrix");
+ Scalar res = 0;
+ for (int j=0; j<mat.outerSize(); ++j)
+ for (typename Derived::InnerIterator iter(mat,j); iter; ++iter)
+ res += iter.value();
+ return res;
+ }
+};
+
+template<typename Derived1, typename Derived2, int Vectorization, int Unrolling>
+struct ei_dot_impl<Derived1, Derived2, Vectorization, Unrolling, IsSparse>
+{
+ typedef typename Derived1::Scalar Scalar;
+ static Scalar run(const Derived1& v1, const Derived2& v2)
+ {
+ ei_assert(v1.size()>0 && "you are using a non initialized vector");
+ typename Derived1::InnerIterator i(v1,0);
+ typename Derived2::InnerIterator j(v2,0);
+ Scalar res = 0;
+ while (i && j)
+ {
+ if (i.index()==j.index())
+ {
+ res += i.value() * ei_conj(j.value());
+ ++i; ++j;
+ }
+ else if (i.index()<j.index())
+ ++i;
+ else
+ ++j;
+ }
+ return res;
+ }
+};
+
+#endif // EIGEN_SPARSEREDUX_H
diff --git a/Eigen/src/Sparse/SparseVector.h b/Eigen/src/Sparse/SparseVector.h
index 2f80b6ea1..7a2ce70e5 100644
--- a/Eigen/src/Sparse/SparseVector.h
+++ b/Eigen/src/Sparse/SparseVector.h
@@ -272,6 +272,28 @@ class SparseVector
return s;
}
+ // this specialized version does not seems to be faster
+// Scalar dot(const SparseVector& other) const
+// {
+// int i=0, j=0;
+// Scalar res = 0;
+// asm("#begindot");
+// while (i<nonZeros() && j<other.nonZeros())
+// {
+// if (m_data.index(i)==other.m_data.index(j))
+// {
+// res += m_data.value(i) * ei_conj(other.m_data.value(j));
+// ++i; ++j;
+// }
+// else if (m_data.index(i)<other.m_data.index(j))
+// ++i;
+// else
+// ++j;
+// }
+// asm("#enddot");
+// return res;
+// }
+
/** Destructor */
inline ~SparseVector() {}
};
diff --git a/Eigen/src/Sparse/SuperLUSupport.h b/Eigen/src/Sparse/SuperLUSupport.h
index 6f554fa6e..c9acfc25c 100644
--- a/Eigen/src/Sparse/SuperLUSupport.h
+++ b/Eigen/src/Sparse/SuperLUSupport.h
@@ -114,13 +114,13 @@ struct SluMatrix : SuperMatrix
}
};
-template<typename Scalar, int Rows, int Cols, int StorageOrder, int MRows, int MCols>
-struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,StorageOrder,MRows,MCols> >
+template<typename Scalar, int Rows, int Cols, int Options, int MRows, int MCols>
+struct SluMatrixMapHelper<Matrix<Scalar,Rows,Cols,Options,MRows,MCols> >
{
- typedef Matrix<Scalar,Rows,Cols,StorageOrder,MRows,MCols> MatrixType;
+ typedef Matrix<Scalar,Rows,Cols,Options,MRows,MCols> MatrixType;
static void run(MatrixType& mat, SluMatrix& res)
{
- assert(StorageOrder==0 && "row-major dense matrices is not supported by SuperLU");
+ ei_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU");
res.setStorageType(SLU_DN);
res.setScalarType<Scalar>();
res.Mtype = SLU_GE;
@@ -139,7 +139,7 @@ struct SluMatrixMapHelper<SparseMatrix<Scalar,Flags> >
typedef SparseMatrix<Scalar,Flags> MatrixType;
static void run(MatrixType& mat, SluMatrix& res)
{
- if (Flags&RowMajorBit)
+ if ((Flags&RowMajorBit)==RowMajorBit)
{
res.setStorageType(SLU_NR);
res.nrow = mat.cols();
@@ -181,7 +181,7 @@ template<typename Scalar, int Flags>
SparseMatrix<Scalar,Flags> SparseMatrix<Scalar,Flags>::Map(SluMatrix& sluMat)
{
SparseMatrix res;
- if (Flags&RowMajorBit)
+ if ((Flags&RowMajorBit)==RowMajorBit)
{
assert(sluMat.Stype == SLU_NR);
res.m_innerSize = sluMat.ncol;
@@ -276,7 +276,7 @@ class SparseLU<MatrixType,SuperLU> : public SparseLU<MatrixType>
mutable UMatrixType m_u;
mutable IntColVectorType m_p;
mutable IntRowVectorType m_q;
-
+
mutable SparseMatrix<Scalar> m_matrix;
mutable SluMatrix m_sluA;
mutable SuperMatrix m_sluL, m_sluU;
@@ -423,7 +423,7 @@ void SparseLU<MatrixType,SuperLU>::extractData() const
int* Ucol = m_u._outerIndexPtr();
int* Urow = m_u._innerIndexPtr();
Scalar* Uval = m_u._valuePtr();
-
+
Ucol[0] = 0;
Ucol[0] = 0;
@@ -434,7 +434,7 @@ void SparseLU<MatrixType,SuperLU>::extractData() const
istart = L_SUB_START(fsupc);
nsupr = L_SUB_START(fsupc+1) - istart;
upper = 1;
-
+
/* for each column in the supernode */
for (int j = fsupc; j < L_FST_SUPC(k+1); ++j)
{
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 326965c23..0fac20915 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -187,5 +187,5 @@ ei_add_test(parametrizedline)
ei_add_test(alignedbox)
ei_add_test(regression)
ei_add_test(sparse_basic " " "${SPARSE_LIBS}")
+ei_add_test(sparse_vector " " "${SPARSE_LIBS}")
ei_add_test(sparse_solvers " " "${SPARSE_LIBS}")
-
diff --git a/test/sparse.h b/test/sparse.h
index bf4a2ac04..310e976f2 100644
--- a/test/sparse.h
+++ b/test/sparse.h
@@ -89,4 +89,28 @@ initSparse(double density,
sparseMat.endFill();
}
+template<typename Scalar> void
+initSparse(double density,
+ Matrix<Scalar,Dynamic,1>& refVec,
+ SparseVector<Scalar>& sparseVec,
+ std::vector<int>* zeroCoords = 0,
+ std::vector<int>* nonzeroCoords = 0)
+{
+ sparseVec.reserve(refVec.size()*density);
+ sparseVec.setZero();
+ for(int i=0; i<refVec.size(); i++)
+ {
+ Scalar v = (ei_random<double>(0,1) < density) ? ei_random<Scalar>() : Scalar(0);
+ if (v!=Scalar(0))
+ {
+ sparseVec.fill(i) = v;
+ if (nonzeroCoords)
+ nonzeroCoords->push_back(i);
+ }
+ else if (zeroCoords)
+ zeroCoords->push_back(i);
+ refVec[i] = v;
+ }
+}
+
#endif // EIGEN_TESTSPARSE_H
diff --git a/test/sparse_vector.cpp b/test/sparse_vector.cpp
new file mode 100644
index 000000000..0a25884ca
--- /dev/null
+++ b/test/sparse_vector.cpp
@@ -0,0 +1,92 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra. Eigen itself is part of the KDE project.
+//
+// Copyright (C) 2008 Daniel Gomez Ferro <dgomezferro@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#include "sparse.h"
+
+template<typename Scalar> void sparse_vector(int rows, int cols)
+{
+ double densityMat = std::max(8./(rows*cols), 0.01);
+ double densityVec = std::max(8./float(rows), 0.1);
+ typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
+ typedef Matrix<Scalar,Dynamic,1> DenseVector;
+ typedef SparseVector<Scalar> SparseVectorType;
+ typedef SparseMatrix<Scalar> SparseMatrixType;
+ Scalar eps = 1e-6;
+
+ SparseMatrixType m1(rows,cols);
+ SparseVectorType v1(rows), v2(rows), v3(rows);
+ DenseMatrix refM1 = DenseMatrix::Zero(rows, cols);
+ DenseVector refV1 = DenseVector::Random(rows),
+ refV2 = DenseVector::Random(rows),
+ refV3 = DenseVector::Random(rows);
+
+ std::vector<int> zerocoords, nonzerocoords;
+ initSparse<Scalar>(densityVec, refV1, v1, &zerocoords, &nonzerocoords);
+ initSparse<Scalar>(densityMat, refM1, m1);
+
+ initSparse<Scalar>(densityVec, refV2, v2);
+ initSparse<Scalar>(densityVec, refV3, v3);
+
+ Scalar s1 = ei_random<Scalar>();
+
+ // test coeff and coeffRef
+ for (unsigned int i=0; i<zerocoords.size(); ++i)
+ {
+ VERIFY_IS_MUCH_SMALLER_THAN( v1.coeff(zerocoords[i]), eps );
+ VERIFY_RAISES_ASSERT( v1.coeffRef(zerocoords[i]) = 5 );
+ }
+ {
+ VERIFY(int(nonzerocoords.size()) == v1.nonZeros());
+ int j=0;
+ for (typename SparseVectorType::InnerIterator it(v1); it; ++it,++j)
+ {
+ VERIFY(nonzerocoords[j]==it.index());
+ VERIFY(it.value()==v1[it.index()]);
+ }
+ }
+ VERIFY_IS_APPROX(v1, refV1);
+
+ v1.coeffRef(nonzerocoords[0]) = Scalar(5);
+ refV1.coeffRef(nonzerocoords[0]) = Scalar(5);
+ VERIFY_IS_APPROX(v1, refV1);
+
+ VERIFY_IS_APPROX(v1+v2, refV1+refV2);
+ VERIFY_IS_APPROX(v1+v2+v3, refV1+refV2+refV3);
+
+ VERIFY_IS_APPROX(v1*s1-v2, refV1*s1-refV2);
+
+ std::cerr << v1.dot(v2) << " == " << refV1.dot(refV2) << "\n";
+ VERIFY_IS_APPROX(v1.dot(v2), refV1.dot(refV2));
+
+}
+
+void test_sparse_vector()
+{
+ for(int i = 0; i < g_repeat; i++) {
+ CALL_SUBTEST( sparse_vector<double>(8, 8) );
+// CALL_SUBTEST( sparse_vector<std::complex<double> >(16, 16) );
+ CALL_SUBTEST( sparse_vector<double>(299, 535) );
+ }
+}
+