aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2015-04-01 22:27:34 +0200
committerGravatar Gael Guennebaud <g.gael@free.fr>2015-04-01 22:27:34 +0200
commit3105986e7125b659385ace69b95c1a38464cb157 (patch)
tree1fec25755668dc345bd01a0ece6ff88d9f5989a7
parent39dcd01b0ac8556d1d46d5d897bdefa82cf5d91c (diff)
bug #875: remove broken SparseMatrixBase::nonZeros and introduce a nonZerosEstimate() method to sparse evaluators for internal uses.
Factorize some code in SparseCompressedBase.
-rw-r--r--Eigen/src/SparseCore/ConservativeSparseSparseProduct.h8
-rw-r--r--Eigen/src/SparseCore/SparseBlock.h32
-rw-r--r--Eigen/src/SparseCore/SparseCompressedBase.h23
-rw-r--r--Eigen/src/SparseCore/SparseCwiseBinaryOp.h20
-rw-r--r--Eigen/src/SparseCore/SparseCwiseUnaryOp.h4
-rw-r--r--Eigen/src/SparseCore/SparseMap.h3
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h12
-rw-r--r--Eigen/src/SparseCore/SparseMatrixBase.h3
-rw-r--r--Eigen/src/SparseCore/SparseSparseProductWithPruning.h16
-rw-r--r--Eigen/src/SparseCore/SparseTranspose.h8
-rw-r--r--Eigen/src/SparseCore/SparseTriangularView.h11
-rw-r--r--Eigen/src/SparseCore/SparseVector.h4
-rw-r--r--test/sparse_product.cpp5
13 files changed, 90 insertions, 59 deletions
diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index 244f1b50e..d25a161f7 100644
--- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
+++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -30,16 +30,16 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
std::memset(mask,0,sizeof(bool)*rows);
+ typename evaluator<Lhs>::type lhsEval(lhs);
+ typename evaluator<Rhs>::type rhsEval(rhs);
+
// estimate the number of non zero entries
// given a rhs column containing Y non zeros, we assume that the respective Y columns
// of the lhs differs in average of one non zeros, thus the number of non zeros for
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
// per column of the lhs.
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
- Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
-
- typename evaluator<Lhs>::type lhsEval(lhs);
- typename evaluator<Rhs>::type rhsEval(rhs);
+ Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
res.setZero();
res.reserve(Index(estimated_nnz_prod));
diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h
index e5ef10212..71f4b37b7 100644
--- a/Eigen/src/SparseCore/SparseBlock.h
+++ b/Eigen/src/SparseCore/SparseBlock.h
@@ -90,7 +90,8 @@ class sparse_matrix_block_impl
typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
public:
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
- EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+ typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
protected:
typedef typename Base::IndexVector IndexVector;
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
@@ -198,20 +199,9 @@ public:
{ return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; }
inline const StorageIndex* innerNonZeroPtr() const
- { return isCompressed() ? 0 : m_matrix.innerNonZeroPtr(); }
+ { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
inline StorageIndex* innerNonZeroPtr()
- { return isCompressed() ? 0 : m_matrix.const_cast_derived().innerNonZeroPtr(); }
-
- Index nonZeros() const
- {
- if(m_matrix.isCompressed())
- return ( (m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
- - (m_matrix.outerIndexPtr()[m_outerStart]));
- else if(m_outerSize.value()==0)
- return 0;
- else
- return Map<const IndexVector>(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
- }
+ { return isCompressed() ? 0 : (m_matrix.const_cast_derived().innerNonZeroPtr()+m_outerStart); }
bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
@@ -233,7 +223,7 @@ public:
const Scalar& lastCoeff() const
{
EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl);
- eigen_assert(nonZeros()>0);
+ eigen_assert(Base::nonZeros()>0);
if(m_matrix.isCompressed())
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
else
@@ -417,6 +407,9 @@ public:
protected:
friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
friend class ReverseInnerIterator;
+ friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
+
+ Index nonZeros() const { return Dynamic; }
EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl)
@@ -548,9 +541,16 @@ struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBa
explicit unary_evaluator(const XprType& op)
: m_argImpl(op.nestedExpression()), m_block(op)
{}
+
+ inline Index nonZerosEstimate() const {
+ Index nnz = m_block.nonZeros();
+ if(nnz<0)
+ return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size();
+ return nnz;
+ }
protected:
- typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
typename evaluator<ArgType>::nestedType m_argImpl;
const XprType &m_block;
diff --git a/Eigen/src/SparseCore/SparseCompressedBase.h b/Eigen/src/SparseCore/SparseCompressedBase.h
index a5ba45e04..0dbb94faf 100644
--- a/Eigen/src/SparseCore/SparseCompressedBase.h
+++ b/Eigen/src/SparseCore/SparseCompressedBase.h
@@ -35,6 +35,25 @@ class SparseCompressedBase
class InnerIterator;
class ReverseInnerIterator;
+ protected:
+ typedef typename Base::IndexVector IndexVector;
+ Eigen::Map<IndexVector> innerNonZeros() { return Eigen::Map<IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }
+ const Eigen::Map<const IndexVector> innerNonZeros() const { return Eigen::Map<const IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }
+
+ public:
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const
+ {
+ if(isCompressed())
+ return outerIndexPtr()[derived().outerSize()]-outerIndexPtr()[0];
+ else if(derived().outerSize()==0)
+ return 0;
+ else
+ return innerNonZeros().sum();
+
+ }
+
/** \returns a const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
@@ -165,6 +184,10 @@ struct evaluator<SparseCompressedBase<Derived> >
evaluator() : m_matrix(0) {}
explicit evaluator(const Derived &mat) : m_matrix(&mat) {}
+ inline Index nonZerosEstimate() const {
+ return m_matrix->nonZeros();
+ }
+
operator Derived&() { return m_matrix->const_cast_derived(); }
operator const Derived&() const { return *m_matrix; }
diff --git a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
index 3b4e9df59..f53427abf 100644
--- a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
+++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -121,6 +121,10 @@ public:
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{ }
+
+ inline Index nonZerosEstimate() const {
+ return m_lhsImpl.nonZerosEstimate() + m_rhsImpl.nonZerosEstimate();
+ }
protected:
const BinaryOp m_functor;
@@ -198,6 +202,10 @@ public:
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{ }
+
+ inline Index nonZerosEstimate() const {
+ return (std::min)(m_lhsImpl.nonZerosEstimate(), m_rhsImpl.nonZerosEstimate());
+ }
protected:
const BinaryOp m_functor;
@@ -243,7 +251,7 @@ public:
EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
-
+
protected:
const LhsEvaluator &m_lhsEval;
RhsIterator m_rhsIter;
@@ -262,6 +270,10 @@ public:
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{ }
+
+ inline Index nonZerosEstimate() const {
+ return m_rhsImpl.nonZerosEstimate();
+ }
protected:
const BinaryOp m_functor;
@@ -308,7 +320,7 @@ public:
EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
-
+
protected:
LhsIterator m_lhsIter;
const RhsEvaluator &m_rhsEval;
@@ -327,6 +339,10 @@ public:
m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs())
{ }
+
+ inline Index nonZerosEstimate() const {
+ return m_lhsImpl.nonZerosEstimate();
+ }
protected:
const BinaryOp m_functor;
diff --git a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
index 63d8f329c..d484be876 100644
--- a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
+++ b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
@@ -30,6 +30,10 @@ struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>
};
explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression()) {}
+
+ inline Index nonZerosEstimate() const {
+ return m_argImpl.nonZerosEstimate();
+ }
protected:
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
diff --git a/Eigen/src/SparseCore/SparseMap.h b/Eigen/src/SparseCore/SparseMap.h
index a6ff7d559..7c512d9fe 100644
--- a/Eigen/src/SparseCore/SparseMap.h
+++ b/Eigen/src/SparseCore/SparseMap.h
@@ -105,9 +105,6 @@ class SparseMapBase<Derived,ReadOnlyAccessors>
return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
}
- /** \returns the number of non zero coefficients */
- inline Index nonZeros() const { return m_nnz; }
-
inline SparseMapBase(Index rows, Index cols, Index nnz, IndexPointer outerIndexPtr, IndexPointer innerIndexPtr,
ScalarPointer valuePtr, IndexPointer innerNonZerosPtr = 0)
: m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index 4c3a47959..ef93cf80c 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -95,6 +95,7 @@ class SparseMatrix
public:
typedef SparseCompressedBase<SparseMatrix> Base;
using Base::isCompressed;
+ using Base::nonZeros;
_EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
@@ -122,9 +123,6 @@ class SparseMatrix
StorageIndex* m_outerIndex;
StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
Storage m_data;
-
- Eigen::Map<IndexVector> innerNonZeros() { return Eigen::Map<IndexVector>(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
- const Eigen::Map<const IndexVector> innerNonZeros() const { return Eigen::Map<const IndexVector>(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
public:
@@ -252,14 +250,6 @@ class SparseMatrix
memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
}
- /** \returns the number of non zero coefficients */
- inline Index nonZeros() const
- {
- if(m_innerNonZeros)
- return innerNonZeros().sum();
- return convert_index(Index(m_data.size()));
- }
-
/** Preallocates \a reserveSize non zeros.
*
* Precondition: the matrix must be in compressed mode. */
diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
index 55b0ad9d2..d4ab8b908 100644
--- a/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -149,9 +149,6 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
/** \returns the number of coefficients, which is \a rows()*cols().
* \sa rows(), cols(). */
inline Index size() const { return rows() * cols(); }
- /** \returns the number of nonzero coefficients which is in practice the number
- * of stored coefficients. */
- inline Index nonZeros() const { return derived().nonZeros(); }
/** \returns true if either the number of rows or the number of columns is equal to 1.
* In other words, this function returns
* \code rows()==1 || cols()==1 \endcode
diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
index 3db01bf2d..48050077e 100644
--- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
+++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -33,14 +33,6 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
// allocate a temporary buffer
AmbiVector<Scalar,StorageIndex> tempVector(rows);
- // estimate the number of non zero entries
- // given a rhs column containing Y non zeros, we assume that the respective Y columns
- // of the lhs differs in average of one non zeros, thus the number of non zeros for
- // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
- // per column of the lhs.
- // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
- Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
-
// mimics a resizeByInnerOuter:
if(ResultType::IsRowMajor)
res.resize(cols, rows);
@@ -49,6 +41,14 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
typename evaluator<Lhs>::type lhsEval(lhs);
typename evaluator<Rhs>::type rhsEval(rhs);
+
+ // estimate the number of non zero entries
+ // given a rhs column containing Y non zeros, we assume that the respective Y columns
+ // of the lhs differs in average of one non zeros, thus the number of non zeros for
+ // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
+ // per column of the lhs.
+ // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
+ Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
res.reserve(estimated_nnz_prod);
double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols());
diff --git a/Eigen/src/SparseCore/SparseTranspose.h b/Eigen/src/SparseCore/SparseTranspose.h
index 45d9c6700..d3fc7f102 100644
--- a/Eigen/src/SparseCore/SparseTranspose.h
+++ b/Eigen/src/SparseCore/SparseTranspose.h
@@ -40,15 +40,11 @@ namespace internal {
};
}
-// Implement nonZeros() for transpose. I'm not sure that's the best approach for that.
-// Perhaps it should be implemented in Transpose<> itself.
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
: public internal::SparseTransposeImpl<MatrixType>
{
protected:
typedef internal::SparseTransposeImpl<MatrixType> Base;
- public:
- inline Index nonZeros() const { return Base::derived().nestedExpression().nonZeros(); }
};
namespace internal {
@@ -61,6 +57,10 @@ struct unary_evaluator<Transpose<ArgType>, IteratorBased>
typedef typename evaluator<ArgType>::ReverseInnerIterator EvalReverseIterator;
public:
typedef Transpose<ArgType> XprType;
+
+ inline Index nonZerosEstimate() const {
+ return m_argImpl.nonZerosEstimate();
+ }
class InnerIterator : public EvalIterator
{
diff --git a/Eigen/src/SparseCore/SparseTriangularView.h b/Eigen/src/SparseCore/SparseTriangularView.h
index b5fbcbdde..34ec07a13 100644
--- a/Eigen/src/SparseCore/SparseTriangularView.h
+++ b/Eigen/src/SparseCore/SparseTriangularView.h
@@ -50,13 +50,6 @@ protected:
template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
-
- inline Index nonZeros() const {
- // FIXME HACK number of nonZeros is required for product logic
- // this returns only an upper bound (but should be OK for most purposes)
- return derived().nestedExpression().nonZeros();
- }
-
};
@@ -191,6 +184,10 @@ public:
explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()) {}
+ inline Index nonZerosEstimate() const {
+ return m_argImpl.nonZerosEstimate();
+ }
+
class InnerIterator : public EvalIterator
{
typedef EvalIterator Base;
diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h
index 35bcec819..7b65f32bc 100644
--- a/Eigen/src/SparseCore/SparseVector.h
+++ b/Eigen/src/SparseCore/SparseVector.h
@@ -442,6 +442,10 @@ struct evaluator<SparseVector<_Scalar,_Options,_Index> >
explicit evaluator(const SparseVectorType &mat) : m_matrix(mat) {}
+ inline Index nonZerosEstimate() const {
+ return m_matrix.nonZeros();
+ }
+
operator SparseVectorType&() { return m_matrix.const_cast_derived(); }
operator const SparseVectorType&() const { return m_matrix; }
diff --git a/test/sparse_product.cpp b/test/sparse_product.cpp
index 480a660fc..3bad3def7 100644
--- a/test/sparse_product.cpp
+++ b/test/sparse_product.cpp
@@ -67,6 +67,9 @@ template<typename SparseMatrixType> void sparse_product()
VERIFY_IS_APPROX(m4 = m2*m3/s1, refMat4 = refMat2*refMat3/s1);
VERIFY_IS_APPROX(m4 = m2*m3*s1, refMat4 = refMat2*refMat3*s1);
VERIFY_IS_APPROX(m4 = s2*m2*m3*s1, refMat4 = s2*refMat2*refMat3*s1);
+ VERIFY_IS_APPROX(m4 = (m2+m2)*m3, refMat4 = (refMat2+refMat2)*refMat3);
+ VERIFY_IS_APPROX(m4 = m2*m3.leftCols(cols/2), refMat4 = refMat2*refMat3.leftCols(cols/2));
+ VERIFY_IS_APPROX(m4 = m2*(m3+m3).leftCols(cols/2), refMat4 = refMat2*(refMat3+refMat3).leftCols(cols/2));
VERIFY_IS_APPROX(m4=(m2*m3).pruned(0), refMat4=refMat2*refMat3);
VERIFY_IS_APPROX(m4=(m2t.transpose()*m3).pruned(0), refMat4=refMat2t.transpose()*refMat3);
@@ -194,7 +197,7 @@ template<typename SparseMatrixType> void sparse_product()
VERIFY_IS_APPROX(d3=d1*m2.transpose(), refM3=d1*refM2.transpose());
}
- // test self-adjoint and traingular-view products
+ // test self-adjoint and triangular-view products
{
DenseMatrix b = DenseMatrix::Random(rows, rows);
DenseMatrix x = DenseMatrix::Random(rows, rows);