aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen/src/SparseCore
diff options
context:
space:
mode:
Diffstat (limited to 'Eigen/src/SparseCore')
-rw-r--r--Eigen/src/SparseCore/ConservativeSparseSparseProduct.h15
-rw-r--r--Eigen/src/SparseCore/MappedSparseMatrix.h28
-rw-r--r--Eigen/src/SparseCore/SparseAssign.h293
-rw-r--r--Eigen/src/SparseCore/SparseBlock.h160
-rw-r--r--Eigen/src/SparseCore/SparseCwiseBinaryOp.h318
-rw-r--r--Eigen/src/SparseCore/SparseCwiseUnaryOp.h163
-rw-r--r--Eigen/src/SparseCore/SparseDenseProduct.h419
-rw-r--r--Eigen/src/SparseCore/SparseDiagonalProduct.h133
-rw-r--r--Eigen/src/SparseCore/SparseDot.h13
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h128
-rw-r--r--Eigen/src/SparseCore/SparseMatrixBase.h135
-rw-r--r--Eigen/src/SparseCore/SparsePermutation.h135
-rw-r--r--Eigen/src/SparseCore/SparseProduct.h77
-rw-r--r--Eigen/src/SparseCore/SparseRedux.h8
-rw-r--r--Eigen/src/SparseCore/SparseSelfAdjointView.h361
-rw-r--r--Eigen/src/SparseCore/SparseSparseProductWithPruning.h65
-rw-r--r--Eigen/src/SparseCore/SparseTranspose.h65
-rw-r--r--Eigen/src/SparseCore/SparseTriangularView.h178
-rw-r--r--Eigen/src/SparseCore/SparseUtil.h40
-rw-r--r--Eigen/src/SparseCore/SparseVector.h52
-rw-r--r--Eigen/src/SparseCore/SparseView.h181
-rw-r--r--Eigen/src/SparseCore/TriangularSolver.h176
22 files changed, 2784 insertions, 359 deletions
diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index 8067565f9..815fdb6d8 100644
--- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
+++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -36,6 +36,11 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
// per column of the lhs.
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
+
+#ifdef EIGEN_TEST_EVALUATORS
+ typename evaluator<Lhs>::type lhsEval(lhs);
+ typename evaluator<Rhs>::type rhsEval(rhs);
+#endif
res.setZero();
res.reserve(Index(estimated_nnz_prod));
@@ -45,11 +50,19 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
res.startVec(j);
Index nnz = 0;
+#ifndef EIGEN_TEST_EVALUATORS
for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+#else
+ for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
+#endif
{
Scalar y = rhsIt.value();
Index k = rhsIt.index();
+#ifndef EIGEN_TEST_EVALUATORS
for (typename Lhs::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt)
+#else
+ for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
+#endif
{
Index i = lhsIt.index();
Scalar x = lhsIt.value();
diff --git a/Eigen/src/SparseCore/MappedSparseMatrix.h b/Eigen/src/SparseCore/MappedSparseMatrix.h
index ab1a266a9..9205b906f 100644
--- a/Eigen/src/SparseCore/MappedSparseMatrix.h
+++ b/Eigen/src/SparseCore/MappedSparseMatrix.h
@@ -176,6 +176,34 @@ class MappedSparseMatrix<Scalar,_Flags,_Index>::ReverseInnerIterator
const Index m_end;
};
+#ifdef EIGEN_ENABLE_EVALUATORS
+namespace internal {
+
+template<typename _Scalar, int _Options, typename _Index>
+struct evaluator<MappedSparseMatrix<_Scalar,_Options,_Index> >
+ : evaluator_base<MappedSparseMatrix<_Scalar,_Options,_Index> >
+{
+ typedef MappedSparseMatrix<_Scalar,_Options,_Index> MappedSparseMatrixType;
+ typedef typename MappedSparseMatrixType::InnerIterator InnerIterator;
+ typedef typename MappedSparseMatrixType::ReverseInnerIterator ReverseInnerIterator;
+
+ enum {
+ CoeffReadCost = NumTraits<_Scalar>::ReadCost,
+ Flags = MappedSparseMatrixType::Flags
+ };
+
+ evaluator() : m_matrix(0) {}
+ evaluator(const MappedSparseMatrixType &mat) : m_matrix(&mat) {}
+
+ operator MappedSparseMatrixType&() { return m_matrix->const_cast_derived(); }
+ operator const MappedSparseMatrixType&() const { return *m_matrix; }
+
+ const MappedSparseMatrixType *m_matrix;
+};
+
+}
+#endif
+
} // end namespace Eigen
#endif // EIGEN_MAPPED_SPARSEMATRIX_H
diff --git a/Eigen/src/SparseCore/SparseAssign.h b/Eigen/src/SparseCore/SparseAssign.h
new file mode 100644
index 000000000..0a29cb32f
--- /dev/null
+++ b/Eigen/src/SparseCore/SparseAssign.h
@@ -0,0 +1,293 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEASSIGN_H
+#define EIGEN_SPARSEASSIGN_H
+
+namespace Eigen {
+
+#ifndef EIGEN_TEST_EVALUATORS
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
+{
+ other.derived().evalTo(derived());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& SparseMatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
+{
+ other.evalTo(derived());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
+{
+ return assign(other.derived());
+}
+
+template<typename Derived>
+inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other)
+{
+// if (other.isRValue())
+// derived().swap(other.const_cast_derived());
+// else
+ return assign(other.derived());
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+inline Derived& SparseMatrixBase<Derived>::assign(const OtherDerived& other)
+{
+ const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? Index(other.rows()) : Index(other.cols());
+ if ((!transpose) && other.isRValue())
+ {
+ // eval without temporary
+ derived().resize(Index(other.rows()), Index(other.cols()));
+ derived().setZero();
+ derived().reserve((std::max)(this->rows(),this->cols())*2);
+ for (Index j=0; j<outerSize; ++j)
+ {
+ derived().startVec(j);
+ for (typename OtherDerived::InnerIterator it(other, typename OtherDerived::Index(j)); it; ++it)
+ {
+ Scalar v = it.value();
+ derived().insertBackByOuterInner(j,Index(it.index())) = v;
+ }
+ }
+ derived().finalize();
+ }
+ else
+ {
+ assignGeneric(other);
+ }
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+inline void SparseMatrixBase<Derived>::assignGeneric(const OtherDerived& other)
+{
+ //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ eigen_assert(( ((internal::traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
+ (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
+ "the transpose operation is supposed to be handled in SparseMatrix::operator=");
+
+ enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
+
+ const Index outerSize = Index(other.outerSize());
+ //typedef typename internal::conditional<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::type TempType;
+ // thanks to shallow copies, we always eval to a tempary
+ Derived temp(Index(other.rows()), Index(other.cols()));
+
+ temp.reserve((std::max)(this->rows(),this->cols())*2);
+ for (Index j=0; j<outerSize; ++j)
+ {
+ temp.startVec(j);
+ for (typename OtherDerived::InnerIterator it(other.derived(), typename OtherDerived::Index(j)); it; ++it)
+ {
+ Scalar v = it.value();
+ temp.insertBackByOuterInner(Flip?Index(it.index()):j,Flip?j:Index(it.index())) = v;
+ }
+ }
+ temp.finalize();
+
+ derived() = temp.markAsRValue();
+}
+
+// template<typename Lhs, typename Rhs>
+// inline Derived& operator=(const SparseSparseProduct<Lhs,Rhs>& product);
+//
+// template<typename OtherDerived>
+// Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
+// template<typename OtherDerived>
+// Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
+//
+// Derived& operator*=(const Scalar& other);
+// Derived& operator/=(const Scalar& other);
+//
+// template<typename OtherDerived>
+// Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
+
+#else // EIGEN_TEST_EVALUATORS
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
+{
+ // TODO use the evaluator mechanism
+ other.derived().evalTo(derived());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+Derived& SparseMatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
+{
+ // TODO use the evaluator mechanism
+ other.evalTo(derived());
+ return derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
+{
+ // FIXME, by default sparse evaluation do not alias, so we should be able to bypass the generic call_assignment
+ internal::call_assignment/*_no_alias*/(derived(), other.derived());
+ return derived();
+}
+
+template<typename Derived>
+inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other)
+{
+ internal::call_assignment_no_alias(derived(), other.derived());
+ return derived();
+}
+
+namespace internal {
+
+template<>
+struct storage_kind_to_evaluator_kind<Sparse> {
+ typedef IteratorBased Kind;
+};
+
+template<>
+struct storage_kind_to_shape<Sparse> {
+ typedef SparseShape Shape;
+};
+
+struct Sparse2Sparse {};
+struct Sparse2Dense {};
+
+template<> struct AssignmentKind<SparseShape, SparseShape> { typedef Sparse2Sparse Kind; };
+template<> struct AssignmentKind<SparseShape, SparseTriangularShape> { typedef Sparse2Sparse Kind; };
+template<> struct AssignmentKind<DenseShape, SparseShape> { typedef Sparse2Dense Kind; };
+
+
+template<typename DstXprType, typename SrcXprType>
+void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
+{
+ eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
+
+ typedef typename DstXprType::Index Index;
+ typedef typename DstXprType::Scalar Scalar;
+ typedef typename internal::evaluator<DstXprType>::type DstEvaluatorType;
+ typedef typename internal::evaluator<SrcXprType>::type SrcEvaluatorType;
+
+ SrcEvaluatorType srcEvaluator(src);
+
+ const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
+ const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
+ if ((!transpose) && src.isRValue())
+ {
+ // eval without temporary
+ dst.resize(src.rows(), src.cols());
+ dst.setZero();
+ dst.reserve((std::max)(src.rows(),src.cols())*2);
+ for (Index j=0; j<outerEvaluationSize; ++j)
+ {
+ dst.startVec(j);
+ for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
+ {
+ Scalar v = it.value();
+ dst.insertBackByOuterInner(j,it.index()) = v;
+ }
+ }
+ dst.finalize();
+ }
+ else
+ {
+ // eval through a temporary
+ eigen_assert(( ((internal::traits<DstXprType>::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
+ (!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) &&
+ "the transpose operation is supposed to be handled in SparseMatrix::operator=");
+
+ enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };
+
+
+ DstXprType temp(src.rows(), src.cols());
+
+ temp.reserve((std::max)(src.rows(),src.cols())*2);
+ for (Index j=0; j<outerEvaluationSize; ++j)
+ {
+ temp.startVec(j);
+ for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
+ {
+ Scalar v = it.value();
+ temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
+ }
+ }
+ temp.finalize();
+
+ dst = temp.markAsRValue();
+ }
+}
+
+// Generic Sparse to Sparse assignment
+template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
+struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse, Scalar>
+{
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
+ {
+ eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
+
+ assign_sparse_to_sparse(dst.derived(), src.derived());
+ }
+};
+
+// Sparse to Dense assignment
+template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
+struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Scalar>
+{
+ static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
+ {
+ eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
+ typedef typename SrcXprType::Index Index;
+
+ typename internal::evaluator<SrcXprType>::type srcEval(src);
+ typename internal::evaluator<DstXprType>::type dstEval(dst);
+ const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
+ for (Index j=0; j<outerEvaluationSize; ++j)
+ for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
+ func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value());
+ }
+};
+
+template< typename DstXprType, typename SrcXprType, typename Scalar>
+struct Assignment<DstXprType, SrcXprType, internal::assign_op<typename DstXprType::Scalar>, Sparse2Dense, Scalar>
+{
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &)
+ {
+ eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
+ typedef typename SrcXprType::Index Index;
+
+ dst.setZero();
+ typename internal::evaluator<SrcXprType>::type srcEval(src);
+ typename internal::evaluator<DstXprType>::type dstEval(dst);
+ const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
+ for (Index j=0; j<outerEvaluationSize; ++j)
+ for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
+ dstEval.coeffRef(i.row(),i.col()) = i.value();
+ }
+};
+
+} // end namespace internal
+
+#endif // EIGEN_TEST_EVALUATORS
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEASSIGN_H
diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h
index 491cc72b0..21445b645 100644
--- a/Eigen/src/SparseCore/SparseBlock.h
+++ b/Eigen/src/SparseCore/SparseBlock.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -12,6 +12,7 @@
namespace Eigen {
+// Subset of columns or rows
template<typename XprType, int BlockRows, int BlockCols>
class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
: public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
@@ -25,6 +26,7 @@ protected:
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
+#ifndef EIGEN_TEST_EVALUATORS
class InnerIterator: public XprType::InnerIterator
{
typedef typename BlockImpl::Index Index;
@@ -49,6 +51,7 @@ public:
protected:
Index m_outer;
};
+#endif // EIGEN_TEST_EVALUATORS
inline BlockImpl(const XprType& xpr, Index i)
: m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
@@ -63,13 +66,30 @@ public:
Index nonZeros() const
{
+#ifndef EIGEN_TEST_EVALUATORS
Index nnz = 0;
Index end = m_outerStart + m_outerSize.value();
for(Index j=m_outerStart; j<end; ++j)
for(typename XprType::InnerIterator it(m_matrix, j); it; ++it)
++nnz;
return nnz;
+#else // EIGEN_TEST_EVALUATORS
+ typedef typename internal::evaluator<XprType>::type EvaluatorType;
+ EvaluatorType matEval(m_matrix);
+ Index nnz = 0;
+ Index end = m_outerStart + m_outerSize.value();
+ for(int j=m_outerStart; j<end; ++j)
+ for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)
+ ++nnz;
+ return nnz;
+#endif // EIGEN_TEST_EVALUATORS
}
+
+ inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
+ Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
+ Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
+ Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
@@ -101,6 +121,7 @@ protected:
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
public:
+#ifndef EIGEN_TEST_EVALUATORS
class InnerIterator: public SparseMatrixType::InnerIterator
{
public:
@@ -123,6 +144,7 @@ public:
protected:
Index m_outer;
};
+#endif // EIGEN_TEST_EVALUATORS
inline sparse_matrix_block_impl(const SparseMatrixType& xpr, Index i)
: m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize)
@@ -248,6 +270,12 @@ public:
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
+ Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
+ Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
+ Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
protected:
@@ -406,8 +434,7 @@ public:
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
}
- inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
-
+#ifndef EIGEN_TEST_EVALUATORS
typedef internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel> InnerIterator;
class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator
@@ -433,6 +460,14 @@ public:
inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; }
};
+#endif // EIGEN_TEST_EVALUATORS
+
+ inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; }
+ Index startRow() const { return m_startRow.value(); }
+ Index startCol() const { return m_startCol.value(); }
+ Index blockRows() const { return m_blockRows.value(); }
+ Index blockCols() const { return m_blockCols.value(); }
+
protected:
friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
friend class ReverseInnerIterator;
@@ -538,7 +573,126 @@ namespace internal {
inline operator bool() const { return m_outerPos < m_end; }
};
+
+#ifdef EIGEN_TEST_EVALUATORS
+
+//
+template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
+struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased >
+ : public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >
+{
+ class InnerVectorInnerIterator;
+ class OuterVectorInnerIterator;
+ public:
+ typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType;
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+
+ class ReverseInnerIterator;
+
+ enum {
+ IsRowMajor = XprType::IsRowMajor,
+
+ OuterVector = (BlockCols==1 && ArgType::IsRowMajor)
+ | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
+ // revert to || as soon as not needed anymore.
+ (BlockRows==1 && !ArgType::IsRowMajor),
+
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
+ Flags = XprType::Flags
+ };
+
+ typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator;
+
+ unary_evaluator(const XprType& op)
+ : m_argImpl(op.nestedExpression()), m_block(op)
+ {}
+
+ protected:
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
+
+ typename evaluator<ArgType>::nestedType m_argImpl;
+ const XprType &m_block;
+};
+
+template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
+class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
+ : public EvalIterator
+{
+ const XprType& m_block;
+ Index m_end;
+public:
+
+ EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
+ : EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
+ m_block(aEval.m_block),
+ m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
+ {
+ while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) )
+ EvalIterator::operator++();
+ }
+
+ inline Index index() const { return EvalIterator::index() - (IsRowMajor ? m_block.startCol() : m_block.startRow()); }
+ inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }
+ inline Index row() const { return EvalIterator::row() - m_block.startRow(); }
+ inline Index col() const { return EvalIterator::col() - m_block.startCol(); }
+
+ inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
+};
+
+template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
+class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
+{
+ const unary_evaluator& m_eval;
+ Index m_outerPos;
+ Index m_innerIndex;
+ Scalar m_value;
+ Index m_end;
+public:
+
+ EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
+ : m_eval(aEval),
+ m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) - 1), // -1 so that operator++ finds the first non-zero entry
+ m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
+ m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+
+ ++(*this);
+ }
+
+ inline Index index() const { return m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow()); }
+ inline Index outer() const { return 0; }
+ inline Index row() const { return IsRowMajor ? 0 : index(); }
+ inline Index col() const { return IsRowMajor ? index() : 0; }
+
+ inline Scalar value() const { return m_value; }
+
+ inline OuterVectorInnerIterator& operator++()
+ {
+ // search next non-zero entry
+ while(m_outerPos<m_end)
+ {
+ m_outerPos++;
+ EvalIterator it(m_eval.m_argImpl, m_outerPos);
+ // search for the key m_innerIndex in the current outer-vector
+ while(it && it.index() < m_innerIndex) ++it;
+ if(it && it.index()==m_innerIndex)
+ {
+ m_value = it.value();
+ break;
+ }
+ }
+ return *this;
+ }
+ inline operator bool() const { return m_outerPos < m_end; }
+};
+
+
+#endif // EIGEN_TEST_EVALUATORS
+
} // end namespace internal
diff --git a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
index 60fdd214a..3a7e72cd2 100644
--- a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
+++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -31,12 +31,6 @@ namespace Eigen {
namespace internal {
-template<> struct promote_storage_type<Dense,Sparse>
-{ typedef Sparse ret; };
-
-template<> struct promote_storage_type<Sparse,Dense>
-{ typedef Sparse ret; };
-
template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived,
typename _LhsStorageMode = typename traits<Lhs>::StorageKind,
typename _RhsStorageMode = typename traits<Rhs>::StorageKind>
@@ -44,6 +38,8 @@ class sparse_cwise_binary_op_inner_iterator_selector;
} // end namespace internal
+#ifndef EIGEN_TEST_EVALUATORS
+
template<typename BinaryOp, typename Lhs, typename Rhs>
class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
: public SparseMatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
@@ -290,6 +286,314 @@ class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs,
} // end namespace internal
+#else // EIGEN_TEST_EVALUATORS
+
+namespace internal {
+
+
+// Generic "sparse OP sparse"
+template<typename BinaryOp, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IteratorBased>
+ : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+protected:
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
+ typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
+public:
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
+
+ class ReverseInnerIterator;
+ class InnerIterator
+ {
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef typename XprType::Index Index;
+
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
+ : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)
+ {
+ this->operator++();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
+ ++m_lhsIter;
+ ++m_rhsIter;
+ }
+ else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), Scalar(0));
+ ++m_lhsIter;
+ }
+ else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))
+ {
+ m_id = m_rhsIter.index();
+ m_value = m_functor(Scalar(0), m_rhsIter.value());
+ ++m_rhsIter;
+ }
+ else
+ {
+ m_value = 0; // this is to avoid a compilation warning
+ m_id = -1;
+ }
+ return *this;
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_id; }
+ EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
+ EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
+ Scalar m_value;
+ Index m_id;
+ };
+
+
+ enum {
+ CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ binary_evaluator(const XprType& xpr)
+ : m_functor(xpr.functor()),
+ m_lhsImpl(xpr.lhs()),
+ m_rhsImpl(xpr.rhs())
+ { }
+
+protected:
+ const BinaryOp m_functor;
+ typename evaluator<Lhs>::nestedType m_lhsImpl;
+ typename evaluator<Rhs>::nestedType m_rhsImpl;
+};
+
+// "sparse .* sparse"
+template<typename T, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs>, IteratorBased, IteratorBased>
+ : evaluator_base<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs> >
+{
+protected:
+ typedef scalar_product_op<T> BinaryOp;
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
+ typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
+public:
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
+
+ class ReverseInnerIterator;
+ class InnerIterator
+ {
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef typename XprType::Index Index;
+
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
+ : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)
+ {
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ ++m_lhsIter;
+ ++m_rhsIter;
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ return *this;
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
+ };
+
+
+ enum {
+ CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ binary_evaluator(const XprType& xpr)
+ : m_functor(xpr.functor()),
+ m_lhsImpl(xpr.lhs()),
+ m_rhsImpl(xpr.rhs())
+ { }
+
+protected:
+ const BinaryOp m_functor;
+ typename evaluator<Lhs>::nestedType m_lhsImpl;
+ typename evaluator<Rhs>::nestedType m_rhsImpl;
+};
+
+// "dense .* sparse"
+template<typename T, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs>, IndexBased, IteratorBased>
+ : evaluator_base<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs> >
+{
+protected:
+ typedef scalar_product_op<T> BinaryOp;
+ typedef typename evaluator<Lhs>::type LhsEvaluator;
+ typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
+public:
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
+
+ class ReverseInnerIterator;
+ class InnerIterator
+ {
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef typename XprType::Index Index;
+ enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
+
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
+ : m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ ++m_rhsIter;
+ return *this;
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_lhsEval.coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
+
+ protected:
+ const LhsEvaluator &m_lhsEval;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
+ const Index m_outer;
+ };
+
+
+ enum {
+ CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ binary_evaluator(const XprType& xpr)
+ : m_functor(xpr.functor()),
+ m_lhsImpl(xpr.lhs()),
+ m_rhsImpl(xpr.rhs())
+ { }
+
+protected:
+ const BinaryOp m_functor;
+ typename evaluator<Lhs>::nestedType m_lhsImpl;
+ typename evaluator<Rhs>::nestedType m_rhsImpl;
+};
+
+// "sparse .* dense"
+template<typename T, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs>, IteratorBased, IndexBased>
+ : evaluator_base<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs> >
+{
+protected:
+ typedef scalar_product_op<T> BinaryOp;
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
+ typedef typename evaluator<Rhs>::type RhsEvaluator;
+public:
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
+
+ class ReverseInnerIterator;
+ class InnerIterator
+ {
+ typedef typename traits<XprType>::Scalar Scalar;
+ typedef typename XprType::Index Index;
+ enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
+
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
+ : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ ++m_lhsIter;
+ return *this;
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_lhsIter.value(),
+ m_rhsEval.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
+
+ protected:
+ LhsIterator m_lhsIter;
+ const RhsEvaluator &m_rhsEval;
+ const BinaryOp& m_functor;
+ const Index m_outer;
+ };
+
+
+ enum {
+ CoeffReadCost = evaluator<Lhs>::CoeffReadCost + evaluator<Rhs>::CoeffReadCost + functor_traits<BinaryOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ binary_evaluator(const XprType& xpr)
+ : m_functor(xpr.functor()),
+ m_lhsImpl(xpr.lhs()),
+ m_rhsImpl(xpr.rhs())
+ { }
+
+protected:
+ const BinaryOp m_functor;
+ typename evaluator<Lhs>::nestedType m_lhsImpl;
+ typename evaluator<Rhs>::nestedType m_rhsImpl;
+};
+
+}
+
+#endif
+
+
+
/***************************************************************************
* Implementation of SparseMatrixBase and SparseCwise functions/operators
***************************************************************************/
diff --git a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
index 5a50c7803..0e0f9f5f5 100644
--- a/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
+++ b/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -11,6 +11,8 @@
#define EIGEN_SPARSE_CWISE_UNARY_OP_H
namespace Eigen {
+
+#ifndef EIGEN_TEST_EVALUATORS
template<typename UnaryOp, typename MatrixType>
class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>
@@ -18,12 +20,12 @@ class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>
{
public:
- class InnerIterator;
- class ReverseInnerIterator;
-
typedef CwiseUnaryOp<UnaryOp, MatrixType> Derived;
EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+ class InnerIterator;
+ class ReverseInnerIterator;
+
protected:
typedef typename internal::traits<Derived>::_XprTypeNested _MatrixTypeNested;
typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
@@ -138,6 +140,159 @@ class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::ReverseInnerIterator
const ViewOp m_functor;
};
+#else // EIGEN_TEST_EVALUATORS
+
+namespace internal {
+
+template<typename UnaryOp, typename ArgType>
+struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>
+ : public evaluator_base<CwiseUnaryOp<UnaryOp,ArgType> >
+{
+ public:
+ typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
+
+ class InnerIterator;
+// class ReverseInnerIterator;
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<UnaryOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression()) {}
+
+ protected:
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
+// typedef typename evaluator<ArgType>::ReverseInnerIterator EvalReverseIterator;
+
+ const UnaryOp m_functor;
+ typename evaluator<ArgType>::nestedType m_argImpl;
+};
+
+template<typename UnaryOp, typename ArgType>
+class unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::InnerIterator
+ : public unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer)
+ : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { Base::operator++(); return *this; }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
+
+ protected:
+ const UnaryOp m_functor;
+ private:
+ Scalar& valueRef();
+};
+
+// template<typename UnaryOp, typename ArgType>
+// class unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::ReverseInnerIterator
+// : public unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalReverseIterator
+// {
+// typedef typename XprType::Scalar Scalar;
+// typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalReverseIterator Base;
+// public:
+//
+// EIGEN_STRONG_INLINE ReverseInnerIterator(const XprType& unaryOp, typename XprType::Index outer)
+// : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+// {}
+//
+// EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
+// { Base::operator--(); return *this; }
+//
+// EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
+//
+// protected:
+// const UnaryOp m_functor;
+// private:
+// Scalar& valueRef();
+// };
+
+
+
+
+
+template<typename ViewOp, typename ArgType>
+struct unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>
+ : public evaluator_base<CwiseUnaryView<ViewOp,ArgType> >
+{
+ public:
+ typedef CwiseUnaryView<ViewOp, ArgType> XprType;
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost + functor_traits<ViewOp>::Cost,
+ Flags = XprType::Flags
+ };
+
+ unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression()) {}
+
+ protected:
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
+// typedef typename evaluator<ArgType>::ReverseInnerIterator EvalReverseIterator;
+
+ const ViewOp m_functor;
+ typename evaluator<ArgType>::nestedType m_argImpl;
+};
+
+template<typename ViewOp, typename ArgType>
+class unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::InnerIterator
+ : public unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator
+{
+ typedef typename XprType::Scalar Scalar;
+ typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer)
+ : Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { Base::operator++(); return *this; }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
+ EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(Base::valueRef()); }
+
+ protected:
+ const ViewOp m_functor;
+};
+
+// template<typename ViewOp, typename ArgType>
+// class unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::ReverseInnerIterator
+// : public unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalReverseIterator
+// {
+// typedef typename XprType::Scalar Scalar;
+// typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalReverseIterator Base;
+// public:
+//
+// EIGEN_STRONG_INLINE ReverseInnerIterator(const XprType& unaryOp, typename XprType::Index outer)
+// : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+// {}
+//
+// EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
+// { Base::operator--(); return *this; }
+//
+// EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
+// EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(Base::valueRef()); }
+//
+// protected:
+// const ViewOp m_functor;
+// };
+
+
+} // end namespace internal
+
+#endif // EIGEN_TEST_EVALUATORS
+
template<typename Derived>
EIGEN_STRONG_INLINE Derived&
SparseMatrixBase<Derived>::operator*=(const Scalar& other)
diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h
index d40e966c1..a715b8bde 100644
--- a/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -12,6 +12,155 @@
namespace Eigen {
+namespace internal {
+
+template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; };
+template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; };
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
+ typename AlphaType,
+ int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
+ bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
+struct sparse_time_dense_product_impl;
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
+{
+ typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+ typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+ typedef typename internal::remove_all<DenseResType>::type Res;
+ typedef typename Lhs::Index Index;
+#ifndef EIGEN_TEST_EVALUATORS
+ typedef typename Lhs::InnerIterator LhsInnerIterator;
+#else
+ typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
+#endif
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
+ {
+#ifndef EIGEN_TEST_EVALUATORS
+ const Lhs &lhsEval(lhs);
+#else
+ typename evaluator<Lhs>::type lhsEval(lhs);
+#endif
+ for(Index c=0; c<rhs.cols(); ++c)
+ {
+ Index n = lhs.outerSize();
+ for(Index j=0; j<n; ++j)
+ {
+ typename Res::Scalar tmp(0);
+ for(LhsInnerIterator it(lhsEval,j); it ;++it)
+ tmp += it.value() * rhs.coeff(it.index(),c);
+ res.coeffRef(j,c) = alpha * tmp;
+ }
+ }
+ }
+};
+
+template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
+struct scalar_product_traits<T1, Ref<T2/*, _Options, _StrideType*/> >
+{
+ enum {
+ Defined = 1
+ };
+ typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
+};
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
+{
+ typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+ typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+ typedef typename internal::remove_all<DenseResType>::type Res;
+ typedef typename Lhs::Index Index;
+#ifndef EIGEN_TEST_EVALUATORS
+ typedef typename Lhs::InnerIterator LhsInnerIterator;
+#else
+ typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
+#endif
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
+ {
+#ifndef EIGEN_TEST_EVALUATORS
+ const Lhs &lhsEval(lhs);
+#else
+ typename evaluator<Lhs>::type lhsEval(lhs);
+#endif
+ for(Index c=0; c<rhs.cols(); ++c)
+ {
+ for(Index j=0; j<lhs.outerSize(); ++j)
+ {
+// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
+ typename internal::scalar_product_traits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
+ for(LhsInnerIterator it(lhsEval,j); it ;++it)
+ res.coeffRef(it.index(),c) += it.value() * rhs_j;
+ }
+ }
+ }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
+{
+ typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+ typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+ typedef typename internal::remove_all<DenseResType>::type Res;
+ typedef typename Lhs::Index Index;
+#ifndef EIGEN_TEST_EVALUATORS
+ typedef typename Lhs::InnerIterator LhsInnerIterator;
+#else
+ typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
+#endif
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
+ {
+#ifndef EIGEN_TEST_EVALUATORS
+ const Lhs &lhsEval(lhs);
+#else
+ typename evaluator<Lhs>::type lhsEval(lhs);
+#endif
+ for(Index j=0; j<lhs.outerSize(); ++j)
+ {
+ typename Res::RowXpr res_j(res.row(j));
+ for(LhsInnerIterator it(lhsEval,j); it ;++it)
+ res_j += (alpha*it.value()) * rhs.row(it.index());
+ }
+ }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
+{
+ typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+ typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+ typedef typename internal::remove_all<DenseResType>::type Res;
+ typedef typename Lhs::Index Index;
+#ifndef EIGEN_TEST_EVALUATORS
+ typedef typename Lhs::InnerIterator LhsInnerIterator;
+#else
+ typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
+#endif
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
+ {
+#ifndef EIGEN_TEST_EVALUATORS
+ const Lhs &lhsEval(lhs);
+#else
+ typename evaluator<Lhs>::type lhsEval(lhs);
+#endif
+ for(Index j=0; j<lhs.outerSize(); ++j)
+ {
+ typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
+ for(LhsInnerIterator it(lhsEval,j); it ;++it)
+ res.row(it.index()) += (alpha*it.value()) * rhs_j;
+ }
+ }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
+inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
+{
+ sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
+}
+
+} // end namespace internal
+
+#ifndef EIGEN_TEST_EVALUATORS
template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductReturnType
{
typedef SparseTimeDenseProduct<Lhs,Rhs> Type;
@@ -159,111 +308,6 @@ struct traits<SparseTimeDenseProduct<Lhs,Rhs> >
typedef MatrixXpr XprKind;
};
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
- typename AlphaType,
- int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
- bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
-struct sparse_time_dense_product_impl;
-
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
-struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
-{
- typedef typename internal::remove_all<SparseLhsType>::type Lhs;
- typedef typename internal::remove_all<DenseRhsType>::type Rhs;
- typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename Lhs::Index Index;
- typedef typename Lhs::InnerIterator LhsInnerIterator;
- static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
- {
- for(Index c=0; c<rhs.cols(); ++c)
- {
- Index n = lhs.outerSize();
- for(Index j=0; j<n; ++j)
- {
- typename Res::Scalar tmp(0);
- for(LhsInnerIterator it(lhs,j); it ;++it)
- tmp += it.value() * rhs.coeff(it.index(),c);
- res.coeffRef(j,c) = alpha * tmp;
- }
- }
- }
-};
-
-template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
-struct scalar_product_traits<T1, Ref<T2/*, _Options, _StrideType*/> >
-{
- enum {
- Defined = 1
- };
- typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
-};
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
-struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
-{
- typedef typename internal::remove_all<SparseLhsType>::type Lhs;
- typedef typename internal::remove_all<DenseRhsType>::type Rhs;
- typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename Lhs::InnerIterator LhsInnerIterator;
- typedef typename Lhs::Index Index;
- static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
- {
- for(Index c=0; c<rhs.cols(); ++c)
- {
- for(Index j=0; j<lhs.outerSize(); ++j)
- {
-// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
- typename internal::scalar_product_traits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
- for(LhsInnerIterator it(lhs,j); it ;++it)
- res.coeffRef(it.index(),c) += it.value() * rhs_j;
- }
- }
- }
-};
-
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
-struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
-{
- typedef typename internal::remove_all<SparseLhsType>::type Lhs;
- typedef typename internal::remove_all<DenseRhsType>::type Rhs;
- typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename Lhs::InnerIterator LhsInnerIterator;
- typedef typename Lhs::Index Index;
- static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
- {
- for(Index j=0; j<lhs.outerSize(); ++j)
- {
- typename Res::RowXpr res_j(res.row(j));
- for(LhsInnerIterator it(lhs,j); it ;++it)
- res_j += (alpha*it.value()) * rhs.row(it.index());
- }
- }
-};
-
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
-struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
-{
- typedef typename internal::remove_all<SparseLhsType>::type Lhs;
- typedef typename internal::remove_all<DenseRhsType>::type Rhs;
- typedef typename internal::remove_all<DenseResType>::type Res;
- typedef typename Lhs::InnerIterator LhsInnerIterator;
- typedef typename Lhs::Index Index;
- static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
- {
- for(Index j=0; j<lhs.outerSize(); ++j)
- {
- typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
- for(LhsInnerIterator it(lhs,j); it ;++it)
- res.row(it.index()) += (alpha*it.value()) * rhs_j;
- }
- }
-};
-
-template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
-inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
-{
- sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
-}
-
} // end namespace internal
template<typename Lhs, typename Rhs>
@@ -318,6 +362,169 @@ class DenseTimeSparseProduct
DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
};
+// sparse * dense
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+ return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+#endif // EIGEN_TEST_EVALUATORS
+
+#ifdef EIGEN_TEST_EVALUATORS
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int ProductType>
+struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
+{
+ template<typename Dest>
+ static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
+ {
+ typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
+ typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
+ LhsNested lhsNested(lhs);
+ RhsNested rhsNested(rhs);
+
+ dst.setZero();
+ internal::sparse_time_dense_product(lhsNested, rhsNested, dst, typename Dest::Scalar(1));
+ }
+};
+
+template<typename Lhs, typename Rhs, int ProductType>
+struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
+{
+ template<typename Dest>
+ static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
+ {
+ typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
+ typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
+ LhsNested lhsNested(lhs);
+ RhsNested rhsNested(rhs);
+
+ dst.setZero();
+ // transpoe everything
+ Transpose<Dest> dstT(dst);
+ internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, typename Dest::Scalar(1));
+ }
+};
+
+template<typename LhsT, typename RhsT, bool Transpose>
+struct sparse_dense_outer_product_evaluator
+{
+protected:
+ typedef typename conditional<Transpose,RhsT,LhsT>::type Lhs1;
+ typedef typename conditional<Transpose,LhsT,RhsT>::type Rhs;
+ typedef Product<LhsT,RhsT> ProdXprType;
+
+ // if the actual left-hand side is a dense vector,
+ // then build a sparse-view so that we can seamlessly iterator over it.
+ typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
+ Lhs1, SparseView<Lhs1> >::type Lhs;
+ typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
+ Lhs1 const&, SparseView<Lhs1> >::type LhsArg;
+
+ typedef typename evaluator<Lhs>::type LhsEval;
+ typedef typename evaluator<Rhs>::type RhsEval;
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
+ typedef typename ProdXprType::Scalar Scalar;
+ typedef typename ProdXprType::Index Index;
+
+public:
+ enum {
+ Flags = Transpose ? RowMajorBit : 0,
+ CoeffReadCost = Dynamic
+ };
+
+ class InnerIterator : public LhsIterator
+ {
+ public:
+ InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer)
+ : LhsIterator(xprEval.m_lhsXprImpl, 0),
+ m_outer(outer),
+ m_empty(false),
+ m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<Rhs>::StorageKind() ))
+ {}
+
+ EIGEN_STRONG_INLINE Index outer() const { return m_outer; }
+ EIGEN_STRONG_INLINE Index row() const { return Transpose ? m_outer : LhsIterator::index(); }
+ EIGEN_STRONG_INLINE Index col() const { return Transpose ? LhsIterator::index() : m_outer; }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }
+ EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }
+
+
+ protected:
+ Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const
+ {
+ return rhs.coeff(outer);
+ }
+
+ Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse())
+ {
+ typename RhsEval::InnerIterator it(rhs, outer);
+ if (it && it.index()==0 && it.value()!=Scalar(0))
+ return it.value();
+ m_empty = true;
+ return Scalar(0);
+ }
+
+ Index m_outer;
+ bool m_empty;
+ Scalar m_factor;
+ };
+
+ sparse_dense_outer_product_evaluator(const Lhs &lhs, const Rhs &rhs)
+ : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
+ {}
+
+ // transpose case
+ sparse_dense_outer_product_evaluator(const Rhs &rhs, const Lhs1 &lhs)
+ : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
+ {}
+
+protected:
+ const LhsArg m_lhs;
+ typename evaluator<Lhs>::nestedType m_lhsXprImpl;
+ typename evaluator<Rhs>::nestedType m_rhsXprImpl;
+};
+
+// sparse * dense outer product
+template<typename Lhs, typename Rhs>
+struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape, typename traits<Lhs>::Scalar, typename traits<Rhs>::Scalar>
+ : sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor>
+{
+ typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base;
+
+ typedef Product<Lhs, Rhs> XprType;
+ typedef typename XprType::PlainObject PlainObject;
+
+ product_evaluator(const XprType& xpr)
+ : Base(xpr.lhs(), xpr.rhs())
+ {}
+
+};
+
+template<typename Lhs, typename Rhs>
+struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape, typename traits<Lhs>::Scalar, typename traits<Rhs>::Scalar>
+ : sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor>
+{
+ typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base;
+
+ typedef Product<Lhs, Rhs> XprType;
+ typedef typename XprType::PlainObject PlainObject;
+
+ product_evaluator(const XprType& xpr)
+ : Base(xpr.lhs(), xpr.rhs())
+ {}
+
+};
+
+} // end namespace internal
+
+#endif // EIGEN_TEST_EVALUATORS
+
} // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H
diff --git a/Eigen/src/SparseCore/SparseDiagonalProduct.h b/Eigen/src/SparseCore/SparseDiagonalProduct.h
index c056b4914..9f465a828 100644
--- a/Eigen/src/SparseCore/SparseDiagonalProduct.h
+++ b/Eigen/src/SparseCore/SparseDiagonalProduct.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -24,8 +24,10 @@ namespace Eigen {
// for that particular case
// The two other cases are symmetric.
+#ifndef EIGEN_TEST_EVALUATORS
+
namespace internal {
-
+
template<typename Lhs, typename Rhs>
struct traits<SparseDiagonalProduct<Lhs, Rhs> >
{
@@ -102,9 +104,14 @@ class SparseDiagonalProduct
LhsNested m_lhs;
RhsNested m_rhs;
};
+#endif
namespace internal {
+#ifndef EIGEN_TEST_EVALUATORS
+
+
+
template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
class sparse_diagonal_product_inner_iterator_selector
<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseRowMajor>
@@ -181,10 +188,129 @@ class sparse_diagonal_product_inner_iterator_selector
inline Index row() const { return m_outer; }
};
+#else // EIGEN_TEST_EVALUATORS
+enum {
+ SDP_AsScalarProduct,
+ SDP_AsCwiseProduct
+};
+
+template<typename SparseXprType, typename DiagonalCoeffType, int SDP_Tag>
+struct sparse_diagonal_product_evaluator;
+
+template<typename Lhs, typename Rhs, int ProductTag>
+struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, DiagonalShape, SparseShape, typename traits<Lhs>::Scalar, typename traits<Rhs>::Scalar>
+ : public sparse_diagonal_product_evaluator<Rhs, typename Lhs::DiagonalVectorType, Rhs::Flags&RowMajorBit?SDP_AsScalarProduct:SDP_AsCwiseProduct>
+{
+ typedef Product<Lhs, Rhs, DefaultProduct> XprType;
+ typedef evaluator<XprType> type;
+ typedef evaluator<XprType> nestedType;
+ enum { CoeffReadCost = Dynamic, Flags = Rhs::Flags&RowMajorBit }; // FIXME CoeffReadCost & Flags
+
+ typedef sparse_diagonal_product_evaluator<Rhs, typename Lhs::DiagonalVectorType, Rhs::Flags&RowMajorBit?SDP_AsScalarProduct:SDP_AsCwiseProduct> Base;
+ product_evaluator(const XprType& xpr) : Base(xpr.rhs(), xpr.lhs().diagonal()) {}
+};
+
+template<typename Lhs, typename Rhs, int ProductTag>
+struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, SparseShape, DiagonalShape, typename traits<Lhs>::Scalar, typename traits<Rhs>::Scalar>
+ : public sparse_diagonal_product_evaluator<Lhs, Transpose<const typename Rhs::DiagonalVectorType>, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct>
+{
+ typedef Product<Lhs, Rhs, DefaultProduct> XprType;
+ typedef evaluator<XprType> type;
+ typedef evaluator<XprType> nestedType;
+ enum { CoeffReadCost = Dynamic, Flags = Lhs::Flags&RowMajorBit }; // FIXME CoeffReadCost & Flags
+
+ typedef sparse_diagonal_product_evaluator<Lhs, Transpose<const typename Rhs::DiagonalVectorType>, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct> Base;
+ product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs().diagonal()) {}
+};
+
+template<typename SparseXprType, typename DiagonalCoeffType>
+struct sparse_diagonal_product_evaluator<SparseXprType, DiagonalCoeffType, SDP_AsScalarProduct>
+{
+protected:
+ typedef typename evaluator<SparseXprType>::InnerIterator SparseXprInnerIterator;
+ typedef typename SparseXprType::Scalar Scalar;
+ typedef typename SparseXprType::Index Index;
+
+public:
+ class InnerIterator : public SparseXprInnerIterator
+ {
+ public:
+ InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)
+ : SparseXprInnerIterator(xprEval.m_sparseXprImpl, outer),
+ m_coeff(xprEval.m_diagCoeffImpl.coeff(outer))
+ {}
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_coeff * SparseXprInnerIterator::value(); }
+ protected:
+ typename DiagonalCoeffType::Scalar m_coeff;
+ };
+
+ sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagonalCoeffType &diagCoeff)
+ : m_sparseXprImpl(sparseXpr), m_diagCoeffImpl(diagCoeff)
+ {}
+
+protected:
+ typename evaluator<SparseXprType>::nestedType m_sparseXprImpl;
+ typename evaluator<DiagonalCoeffType>::nestedType m_diagCoeffImpl;
+};
+
+
+template<typename SparseXprType, typename DiagCoeffType>
+struct sparse_diagonal_product_evaluator<SparseXprType, DiagCoeffType, SDP_AsCwiseProduct>
+{
+ typedef typename SparseXprType::Scalar Scalar;
+ typedef typename SparseXprType::Index Index;
+
+ typedef CwiseBinaryOp<scalar_product_op<Scalar>,
+ const typename SparseXprType::ConstInnerVectorReturnType,
+ const DiagCoeffType> CwiseProductType;
+
+ typedef typename evaluator<CwiseProductType>::type CwiseProductEval;
+ typedef typename evaluator<CwiseProductType>::InnerIterator CwiseProductIterator;
+
+ class InnerIterator
+ {
+ public:
+ InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)
+ : m_cwiseEval(xprEval.m_sparseXprNested.innerVector(outer).cwiseProduct(xprEval.m_diagCoeffNested)),
+ m_cwiseIter(m_cwiseEval, 0),
+ m_outer(outer)
+ {}
+
+ inline Scalar value() const { return m_cwiseIter.value(); }
+ inline Index index() const { return m_cwiseIter.index(); }
+ inline Index outer() const { return m_outer; }
+ inline Index col() const { return SparseXprType::IsRowMajor ? m_cwiseIter.index() : m_outer; }
+ inline Index row() const { return SparseXprType::IsRowMajor ? m_outer : m_cwiseIter.index(); }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { ++m_cwiseIter; return *this; }
+ inline operator bool() const { return m_cwiseIter; }
+
+ protected:
+ CwiseProductEval m_cwiseEval;
+ CwiseProductIterator m_cwiseIter;
+ Index m_outer;
+ };
+
+ sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagCoeffType &diagCoeff)
+ : m_sparseXprNested(sparseXpr), m_diagCoeffNested(diagCoeff)
+ {}
+
+protected:
+ typename nested_eval<SparseXprType,1>::type m_sparseXprNested;
+ typename nested_eval<DiagCoeffType,SparseXprType::IsRowMajor ? SparseXprType::RowsAtCompileTime
+ : SparseXprType::ColsAtCompileTime>::type m_diagCoeffNested;
+};
+
+#endif // EIGEN_TEST_EVALUATORS
+
+
} // end namespace internal
-// SparseMatrixBase functions
+#ifndef EIGEN_TEST_EVALUATORS
+// SparseMatrixBase functions
template<typename Derived>
template<typename OtherDerived>
const SparseDiagonalProduct<Derived,OtherDerived>
@@ -192,6 +318,7 @@ SparseMatrixBase<Derived>::operator*(const DiagonalBase<OtherDerived> &other) co
{
return SparseDiagonalProduct<Derived,OtherDerived>(this->derived(), other.derived());
}
+#endif // EIGEN_TEST_EVALUATORS
} // end namespace Eigen
diff --git a/Eigen/src/SparseCore/SparseDot.h b/Eigen/src/SparseCore/SparseDot.h
index db39c9aec..a63cb003c 100644
--- a/Eigen/src/SparseCore/SparseDot.h
+++ b/Eigen/src/SparseCore/SparseDot.h
@@ -26,7 +26,12 @@ SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
eigen_assert(size() == other.size());
eigen_assert(other.size()>0 && "you are using a non initialized vector");
+#ifndef EIGEN_TEST_EVALUATORS
typename Derived::InnerIterator i(derived(),0);
+#else
+ typename internal::evaluator<Derived>::type thisEval(derived());
+ typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);
+#endif
Scalar res(0);
while (i)
{
@@ -49,6 +54,7 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons
eigen_assert(size() == other.size());
+#ifndef EIGEN_TEST_EVALUATORS
typedef typename Derived::Nested Nested;
typedef typename OtherDerived::Nested OtherNested;
typedef typename internal::remove_all<Nested>::type NestedCleaned;
@@ -59,6 +65,13 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons
typename NestedCleaned::InnerIterator i(nthis,0);
typename OtherNestedCleaned::InnerIterator j(nother,0);
+#else
+ typename internal::evaluator<Derived>::type thisEval(derived());
+ typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);
+
+ typename internal::evaluator<OtherDerived>::type otherEval(other.derived());
+ typename internal::evaluator<OtherDerived>::InnerIterator j(otherEval, 0);
+#endif
Scalar res(0);
while (i && j)
{
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index 2ed2f3ebd..9e2dae1b3 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -52,7 +52,9 @@ struct traits<SparseMatrix<_Scalar, _Options, _Index> >
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic,
Flags = _Options | NestByRefBit | LvalueBit,
+#ifndef EIGEN_TEST_EVALUATORS
CoeffReadCost = NumTraits<Scalar>::ReadCost,
+#endif
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
@@ -74,8 +76,10 @@ struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex>
ColsAtCompileTime = 1,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = 1,
- Flags = 0,
- CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
+ Flags = 0
+#ifndef EIGEN_TEST_EVALUATORS
+ , CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
+#endif
};
};
@@ -649,7 +653,13 @@ class SparseMatrix
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
check_template_parameters();
+#ifndef EIGEN_TEST_EVALUATORS
*this = other.derived();
+#else
+ const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
+ if (needToTranspose) *this = other.derived();
+ else internal::call_assignment_no_alias(*this, other.derived());
+#endif
}
/** Constructs a sparse matrix from the sparse selfadjoint view \a other */
@@ -658,7 +668,11 @@ class SparseMatrix
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
+#ifndef EIGEN_TEST_EVALUATORS
*this = other;
+#else
+ Base::operator=(other);
+#endif
}
/** Copy constructor (it performs a deep copy) */
@@ -722,7 +736,8 @@ class SparseMatrix
return *this;
}
- #ifndef EIGEN_PARSED_BY_DOXYGEN
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+#ifndef EIGEN_TEST_EVALUATORS
template<typename Lhs, typename Rhs>
inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
{ return Base::operator=(product); }
@@ -733,11 +748,12 @@ class SparseMatrix
initAssignment(other);
return Base::operator=(other.derived());
}
-
+#endif // EIGEN_TEST_EVALUATORS
+
template<typename OtherDerived>
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
{ return Base::operator=(other.derived()); }
- #endif
+#endif // EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
@@ -1055,6 +1071,7 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
m_data.resize(m_outerIndex[m_outerSize]);
}
+#ifndef EIGEN_TEST_EVALUATORS
template<typename Scalar, int _Options, typename _Index>
template<typename OtherDerived>
EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
@@ -1116,6 +1133,73 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt
return Base::operator=(other.derived());
}
}
+#else
+template<typename Scalar, int _Options, typename _Index>
+template<typename OtherDerived>
+EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
+{
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
+ if (needToTranspose)
+ {
+ // two passes algorithm:
+ // 1 - compute the number of coeffs per dest inner vector
+ // 2 - do the actual copy/eval
+ // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
+ typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
+ typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+ typedef internal::evaluator<_OtherCopy> OtherCopyEval;
+ OtherCopy otherCopy(other.derived());
+ OtherCopyEval otherCopyEval(otherCopy);
+
+ SparseMatrix dest(other.rows(),other.cols());
+ Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
+
+ // pass 1
+ // FIXME the above copy could be merged with that pass
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
+ ++dest.m_outerIndex[it.index()];
+
+ // prefix sum
+ Index count = 0;
+ Matrix<Index,Dynamic,1> positions(dest.outerSize());
+ for (Index j=0; j<dest.outerSize(); ++j)
+ {
+ Index tmp = dest.m_outerIndex[j];
+ dest.m_outerIndex[j] = count;
+ positions[j] = count;
+ count += tmp;
+ }
+ dest.m_outerIndex[dest.outerSize()] = count;
+ // alloc
+ dest.m_data.resize(count);
+ // pass 2
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ {
+ for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
+ {
+ Index pos = positions[it.index()]++;
+ dest.m_data.index(pos) = j;
+ dest.m_data.value(pos) = it.value();
+ }
+ }
+ this->swap(dest);
+ return *this;
+ }
+ else
+ {
+ if(other.isRValue())
+ {
+ initAssignment(other.derived());
+ }
+ // there is no special optimization
+ return Base::operator=(other.derived());
+ }
+}
+#endif
template<typename _Scalar, int _Options, typename _Index>
EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
@@ -1256,6 +1340,38 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
return (m_data.value(p) = 0);
}
+#ifdef EIGEN_ENABLE_EVALUATORS
+namespace internal {
+
+template<typename _Scalar, int _Options, typename _Index>
+struct evaluator<SparseMatrix<_Scalar,_Options,_Index> >
+ : evaluator_base<SparseMatrix<_Scalar,_Options,_Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef SparseMatrix<_Scalar,_Options,_Index> SparseMatrixType;
+ typedef typename SparseMatrixType::InnerIterator InnerIterator;
+ typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator;
+
+ enum {
+ CoeffReadCost = NumTraits<_Scalar>::ReadCost,
+ Flags = SparseMatrixType::Flags
+ };
+
+ evaluator() : m_matrix(0) {}
+ evaluator(const SparseMatrixType &mat) : m_matrix(&mat) {}
+
+ operator SparseMatrixType&() { return m_matrix->const_cast_derived(); }
+ operator const SparseMatrixType&() const { return *m_matrix; }
+
+ Scalar coeff(Index row, Index col) const { return m_matrix->coeff(row,col); }
+
+ const SparseMatrixType *m_matrix;
+};
+
+}
+#endif
+
} // end namespace Eigen
#endif // EIGEN_SPARSEMATRIX_H
diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
index fb5025049..aff4c8b6f 100644
--- a/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -39,11 +39,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
typedef EigenBase<Derived> Base;
template<typename OtherDerived>
- Derived& operator=(const EigenBase<OtherDerived> &other)
- {
- other.derived().evalTo(derived());
- return derived();
- }
+ Derived& operator=(const EigenBase<OtherDerived> &other);
enum {
@@ -83,10 +79,12 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
* constructed from this one. See the \ref flags "list of flags".
*/
+#ifndef EIGEN_TEST_EVALUATORS
CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
/**< This is a rough measure of how expensive it is to read one coefficient from
* this expression.
*/
+#endif
IsRowMajor = Flags&RowMajorBit ? 1 : 0,
@@ -104,10 +102,9 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
Transpose<const Derived>
>::type AdjointReturnType;
-
+ // FIXME storage order do not match evaluator storage order
typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, Index> PlainObject;
-
#ifndef EIGEN_PARSED_BY_DOXYGEN
/** This is the "real scalar" type; if the \a Scalar type is already real numbers
* (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
@@ -175,92 +172,27 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
template<typename OtherDerived>
- Derived& operator=(const ReturnByValue<OtherDerived>& other)
- {
- other.evalTo(derived());
- return derived();
- }
-
+ Derived& operator=(const ReturnByValue<OtherDerived>& other);
template<typename OtherDerived>
- inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other)
- {
- return assign(other.derived());
- }
+ inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other);
- inline Derived& operator=(const Derived& other)
- {
-// if (other.isRValue())
-// derived().swap(other.const_cast_derived());
-// else
- return assign(other.derived());
- }
+ inline Derived& operator=(const Derived& other);
protected:
template<typename OtherDerived>
- inline Derived& assign(const OtherDerived& other)
- {
- const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
- const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? Index(other.rows()) : Index(other.cols());
- if ((!transpose) && other.isRValue())
- {
- // eval without temporary
- derived().resize(Index(other.rows()), Index(other.cols()));
- derived().setZero();
- derived().reserve((std::max)(this->rows(),this->cols())*2);
- for (Index j=0; j<outerSize; ++j)
- {
- derived().startVec(j);
- for (typename OtherDerived::InnerIterator it(other, typename OtherDerived::Index(j)); it; ++it)
- {
- Scalar v = it.value();
- derived().insertBackByOuterInner(j,Index(it.index())) = v;
- }
- }
- derived().finalize();
- }
- else
- {
- assignGeneric(other);
- }
- return derived();
- }
+ inline Derived& assign(const OtherDerived& other);
template<typename OtherDerived>
- inline void assignGeneric(const OtherDerived& other)
- {
- //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
- eigen_assert(( ((internal::traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
- (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
- "the transpose operation is supposed to be handled in SparseMatrix::operator=");
-
- enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
-
- const Index outerSize = Index(other.outerSize());
- //typedef typename internal::conditional<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::type TempType;
- // thanks to shallow copies, we always eval to a tempary
- Derived temp(Index(other.rows()), Index(other.cols()));
-
- temp.reserve((std::max)(this->rows(),this->cols())*2);
- for (Index j=0; j<outerSize; ++j)
- {
- temp.startVec(j);
- for (typename OtherDerived::InnerIterator it(other.derived(), typename OtherDerived::Index(j)); it; ++it)
- {
- Scalar v = it.value();
- temp.insertBackByOuterInner(Flip?Index(it.index()):j,Flip?j:Index(it.index())) = v;
- }
- }
- temp.finalize();
-
- derived() = temp.markAsRValue();
- }
+ inline void assignGeneric(const OtherDerived& other);
public:
+#ifndef EIGEN_TEST_EVALUATORS
template<typename Lhs, typename Rhs>
inline Derived& operator=(const SparseSparseProduct<Lhs,Rhs>& product);
+#endif
friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
{
@@ -333,11 +265,12 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
cwiseProduct(const MatrixBase<OtherDerived> &other) const;
+#ifndef EIGEN_TEST_EVALUATORS
// sparse * sparse
template<typename OtherDerived>
const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
operator*(const SparseMatrixBase<OtherDerived> &other) const;
-
+
// sparse * diagonal
template<typename OtherDerived>
const SparseDiagonalProduct<Derived,OtherDerived>
@@ -348,7 +281,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
const SparseDiagonalProduct<OtherDerived,Derived>
operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
{ return SparseDiagonalProduct<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
-
+
/** dense * sparse (return a dense object unless it is an outer product) */
template<typename OtherDerived> friend
const typename DenseSparseProductReturnType<OtherDerived,Derived>::Type
@@ -361,6 +294,37 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
operator*(const MatrixBase<OtherDerived> &other) const
{ return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived()); }
+#else // EIGEN_TEST_EVALUATORS
+ // sparse * diagonal
+ template<typename OtherDerived>
+ const Product<Derived,OtherDerived>
+ operator*(const DiagonalBase<OtherDerived> &other) const
+ { return Product<Derived,OtherDerived>(derived(), other.derived()); }
+
+ // diagonal * sparse
+ template<typename OtherDerived> friend
+ const Product<OtherDerived,Derived>
+ operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
+ { return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
+
+ // sparse * sparse
+ template<typename OtherDerived>
+ const Product<Derived,OtherDerived>
+ operator*(const SparseMatrixBase<OtherDerived> &other) const;
+
+ // sparse * dense
+ template<typename OtherDerived>
+ const Product<Derived,OtherDerived>
+ operator*(const MatrixBase<OtherDerived> &other) const
+ { return Product<Derived,OtherDerived>(derived(), other.derived()); }
+
+ // dense * sparse
+ template<typename OtherDerived> friend
+ const Product<OtherDerived,Derived>
+ operator*(const MatrixBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
+ { return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
+#endif // EIGEN_TEST_EVALUATORS
+
/** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
{
@@ -371,7 +335,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
template<int Mode>
- inline const SparseTriangularView<Derived, Mode> triangularView() const;
+ inline const TriangularView<Derived, Mode> triangularView() const;
template<unsigned int UpLo> inline const SparseSelfAdjointView<Derived, UpLo> selfadjointView() const;
template<unsigned int UpLo> inline SparseSelfAdjointView<Derived, UpLo> selfadjointView();
@@ -396,6 +360,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
Block<Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize);
const Block<const Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize) const;
+#ifndef EIGEN_TEST_EVALUATORS
/** \internal use operator= */
template<typename DenseDerived>
void evalTo(MatrixBase<DenseDerived>& dst) const
@@ -405,6 +370,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
for (typename Derived::InnerIterator i(derived(),typename Derived::Index(j)); i; ++i)
dst.coeffRef(i.row(),i.col()) = i.value();
}
+#endif
Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
{
@@ -430,6 +396,9 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
{ return typename internal::eval<Derived>::type(derived()); }
Scalar sum() const;
+
+ inline const SparseView<Derived>
+ pruned(const Scalar& reference = Scalar(0), const RealScalar& epsilon = NumTraits<Scalar>::dummy_precision()) const;
protected:
diff --git a/Eigen/src/SparseCore/SparsePermutation.h b/Eigen/src/SparseCore/SparsePermutation.h
index b85be93f6..a888ae9e1 100644
--- a/Eigen/src/SparseCore/SparsePermutation.h
+++ b/Eigen/src/SparseCore/SparsePermutation.h
@@ -103,7 +103,7 @@ struct permut_sparsematrix_product_retval
}
-
+#ifndef EIGEN_TEST_EVALUATORS
/** \returns the matrix with the permutation applied to the columns
*/
@@ -143,6 +143,139 @@ operator*(const Transpose<PermutationBase<PermDerived> >& tperm, const SparseMat
return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, true>(tperm.nestedPermutation(), matrix.derived());
}
+#else // EIGEN_TEST_EVALUATORS
+
+namespace internal {
+
+template <int ProductTag> struct product_promote_storage_type<Sparse, PermutationStorage, ProductTag> { typedef Sparse ret; };
+template <int ProductTag> struct product_promote_storage_type<PermutationStorage, Sparse, ProductTag> { typedef Sparse ret; };
+
+// TODO, the following need cleaning, this is just a copy-past of the dense case
+
+template<typename Lhs, typename Rhs, int ProductTag>
+struct generic_product_impl<Lhs, Rhs, PermutationShape, SparseShape, ProductTag>
+{
+ template<typename Dest>
+ static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
+ {
+ permut_sparsematrix_product_retval<Lhs, Rhs, OnTheLeft, false> pmpr(lhs, rhs);
+ pmpr.evalTo(dst);
+ }
+};
+
+template<typename Lhs, typename Rhs, int ProductTag>
+struct generic_product_impl<Lhs, Rhs, SparseShape, PermutationShape, ProductTag>
+{
+ template<typename Dest>
+ static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
+ {
+ permut_sparsematrix_product_retval<Rhs, Lhs, OnTheRight, false> pmpr(rhs, lhs);
+ pmpr.evalTo(dst);
+ }
+};
+
+template<typename Lhs, typename Rhs, int ProductTag>
+struct generic_product_impl<Transpose<Lhs>, Rhs, PermutationShape, SparseShape, ProductTag>
+{
+ template<typename Dest>
+ static void evalTo(Dest& dst, const Transpose<Lhs>& lhs, const Rhs& rhs)
+ {
+ permut_sparsematrix_product_retval<Lhs, Rhs, OnTheLeft, true> pmpr(lhs.nestedPermutation(), rhs);
+ pmpr.evalTo(dst);
+ }
+};
+
+template<typename Lhs, typename Rhs, int ProductTag>
+struct generic_product_impl<Lhs, Transpose<Rhs>, SparseShape, PermutationShape, ProductTag>
+{
+ template<typename Dest>
+ static void evalTo(Dest& dst, const Lhs& lhs, const Transpose<Rhs>& rhs)
+ {
+ permut_sparsematrix_product_retval<Rhs, Lhs, OnTheRight, true> pmpr(rhs.nestedPermutation(), lhs);
+ pmpr.evalTo(dst);
+ }
+};
+
+// TODO, the following two overloads are only needed to define the right temporary type through
+// typename traits<permut_sparsematrix_product_retval<Rhs,Lhs,OnTheRight,false> >::ReturnType
+// while it should be correctly handled by traits<Product<> >::PlainObject
+
+template<typename Lhs, typename Rhs, int ProductTag>
+struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, PermutationShape, SparseShape, typename traits<Lhs>::Scalar, typename traits<Rhs>::Scalar>
+ : public evaluator<typename traits<permut_sparsematrix_product_retval<Lhs,Rhs,OnTheRight,false> >::ReturnType>::type
+{
+ typedef Product<Lhs, Rhs, DefaultProduct> XprType;
+ typedef typename traits<permut_sparsematrix_product_retval<Lhs,Rhs,OnTheRight,false> >::ReturnType PlainObject;
+ typedef typename evaluator<PlainObject>::type Base;
+
+ product_evaluator(const XprType& xpr)
+ : m_result(xpr.rows(), xpr.cols())
+ {
+ ::new (static_cast<Base*>(this)) Base(m_result);
+ generic_product_impl<Lhs, Rhs, PermutationShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
+ }
+
+protected:
+ PlainObject m_result;
+};
+
+template<typename Lhs, typename Rhs, int ProductTag>
+struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, SparseShape, PermutationShape, typename traits<Lhs>::Scalar, typename traits<Rhs>::Scalar>
+ : public evaluator<typename traits<permut_sparsematrix_product_retval<Rhs,Lhs,OnTheRight,false> >::ReturnType>::type
+{
+ typedef Product<Lhs, Rhs, DefaultProduct> XprType;
+ typedef typename traits<permut_sparsematrix_product_retval<Rhs,Lhs,OnTheRight,false> >::ReturnType PlainObject;
+ typedef typename evaluator<PlainObject>::type Base;
+
+ product_evaluator(const XprType& xpr)
+ : m_result(xpr.rows(), xpr.cols())
+ {
+ ::new (static_cast<Base*>(this)) Base(m_result);
+ generic_product_impl<Lhs, Rhs, SparseShape, PermutationShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
+ }
+
+protected:
+ PlainObject m_result;
+};
+
+} // end namespace internal
+
+/** \returns the matrix with the permutation applied to the columns
+ */
+template<typename SparseDerived, typename PermDerived>
+inline const Product<SparseDerived, PermDerived>
+operator*(const SparseMatrixBase<SparseDerived>& matrix, const PermutationBase<PermDerived>& perm)
+{ return Product<SparseDerived, PermDerived>(matrix.derived(), perm.derived()); }
+
+/** \returns the matrix with the permutation applied to the rows
+ */
+template<typename SparseDerived, typename PermDerived>
+inline const Product<PermDerived, SparseDerived>
+operator*( const PermutationBase<PermDerived>& perm, const SparseMatrixBase<SparseDerived>& matrix)
+{ return Product<PermDerived, SparseDerived>(perm.derived(), matrix.derived()); }
+
+
+// TODO, the following specializations should not be needed as Transpose<Permutation*> should be a PermutationBase.
+/** \returns the matrix with the inverse permutation applied to the columns.
+ */
+template<typename SparseDerived, typename PermDerived>
+inline const Product<SparseDerived, Transpose<PermutationBase<PermDerived> > >
+operator*(const SparseMatrixBase<SparseDerived>& matrix, const Transpose<PermutationBase<PermDerived> >& tperm)
+{
+ return Product<SparseDerived, Transpose<PermutationBase<PermDerived> > >(matrix.derived(), tperm);
+}
+
+/** \returns the matrix with the inverse permutation applied to the rows.
+ */
+template<typename SparseDerived, typename PermDerived>
+inline const Product<Transpose<PermutationBase<PermDerived> >, SparseDerived>
+operator*(const Transpose<PermutationBase<PermDerived> >& tperm, const SparseMatrixBase<SparseDerived>& matrix)
+{
+ return Product<Transpose<PermutationBase<PermDerived> >, SparseDerived>(tperm, matrix.derived());
+}
+
+#endif // EIGEN_TEST_EVALUATORS
+
} // end namespace Eigen
#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/Eigen/src/SparseCore/SparseProduct.h b/Eigen/src/SparseCore/SparseProduct.h
index cf7663070..18f40b9d9 100644
--- a/Eigen/src/SparseCore/SparseProduct.h
+++ b/Eigen/src/SparseCore/SparseProduct.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -12,6 +12,8 @@
namespace Eigen {
+#ifndef EIGEN_TEST_EVALUATORS
+
template<typename Lhs, typename Rhs>
struct SparseSparseProductReturnType
{
@@ -183,6 +185,79 @@ SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other
return typename SparseSparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
}
+#else // EIGEN_TEST_EVALUATORS
+
+
+/** \returns an expression of the product of two sparse matrices.
+ * By default a conservative product preserving the symbolic non zeros is performed.
+ * The automatic pruning of the small values can be achieved by calling the pruned() function
+ * in which case a totally different product algorithm is employed:
+ * \code
+ * C = (A*B).pruned(); // supress numerical zeros (exact)
+ * C = (A*B).pruned(ref);
+ * C = (A*B).pruned(ref,epsilon);
+ * \endcode
+ * where \c ref is a meaningful non zero reference value.
+ * */
+template<typename Derived>
+template<typename OtherDerived>
+inline const Product<Derived,OtherDerived>
+SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+{
+ return Product<Derived,OtherDerived>(derived(), other.derived());
+}
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int ProductType>
+struct generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
+{
+ template<typename Dest>
+ static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
+ {
+ typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
+ typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
+ LhsNested lhsNested(lhs);
+ RhsNested rhsNested(rhs);
+ internal::conservative_sparse_sparse_product_selector<typename remove_all<LhsNested>::type,
+ typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);
+ }
+};
+
+template<typename Lhs, typename Rhs, int Options>
+struct evaluator<SparseView<Product<Lhs, Rhs, Options> > >
+ : public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject>::type
+{
+ typedef SparseView<Product<Lhs, Rhs, Options> > XprType;
+ typedef typename XprType::PlainObject PlainObject;
+ typedef typename evaluator<PlainObject>::type Base;
+
+ typedef evaluator type;
+ typedef evaluator nestedType;
+
+ evaluator(const XprType& xpr)
+ : m_result(xpr.rows(), xpr.cols())
+ {
+ using std::abs;
+ ::new (static_cast<Base*>(this)) Base(m_result);
+ typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
+ typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
+ LhsNested lhsNested(xpr.nestedExpression().lhs());
+ RhsNested rhsNested(xpr.nestedExpression().rhs());
+
+ internal::sparse_sparse_product_with_pruning_selector<typename remove_all<LhsNested>::type,
+ typename remove_all<RhsNested>::type, PlainObject>::run(lhsNested,rhsNested,m_result,
+ abs(xpr.reference())*xpr.epsilon());
+ }
+
+protected:
+ PlainObject m_result;
+};
+
+} // end namespace internal
+
+#endif // EIGEN_TEST_EVALUATORS
+
} // end namespace Eigen
#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/Eigen/src/SparseCore/SparseRedux.h b/Eigen/src/SparseCore/SparseRedux.h
index f3da93a71..cf78d0e91 100644
--- a/Eigen/src/SparseCore/SparseRedux.h
+++ b/Eigen/src/SparseCore/SparseRedux.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -18,8 +18,14 @@ SparseMatrixBase<Derived>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
Scalar res(0);
+#ifndef EIGEN_TEST_EVALUATORS
for (Index j=0; j<outerSize(); ++j)
for (typename Derived::InnerIterator iter(derived(),j); iter; ++iter)
+#else
+ typename internal::evaluator<Derived>::type thisEval(derived());
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename internal::evaluator<Derived>::InnerIterator iter(thisEval,j); iter; ++iter)
+#endif
res += iter.value();
return res;
}
diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h
index 56c922929..4235d6c4c 100644
--- a/Eigen/src/SparseCore/SparseSelfAdjointView.h
+++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -12,13 +12,23 @@
namespace Eigen {
+#ifndef EIGEN_TEST_EVALUATORS
+
+template<typename Lhs, typename Rhs, int Mode>
+class SparseSelfAdjointTimeDenseProduct;
+
+template<typename Lhs, typename Rhs, int Mode>
+class DenseTimeSparseSelfAdjointProduct;
+
+#endif // EIGEN_TEST_EVALUATORS
+
/** \ingroup SparseCore_Module
* \class SparseSelfAdjointView
*
* \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
*
* \param MatrixType the type of the dense matrix storing the coefficients
- * \param UpLo can be either \c #Lower or \c #Upper
+ * \param Mode can be either \c #Lower or \c #Upper
*
* This class is an expression of a sefladjoint matrix from a triangular part of a matrix
* with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
@@ -26,37 +36,33 @@ namespace Eigen {
*
* \sa SparseMatrixBase::selfadjointView()
*/
-template<typename Lhs, typename Rhs, int UpLo>
-class SparseSelfAdjointTimeDenseProduct;
-
-template<typename Lhs, typename Rhs, int UpLo>
-class DenseTimeSparseSelfAdjointProduct;
-
namespace internal {
-template<typename MatrixType, unsigned int UpLo>
-struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
+template<typename MatrixType, unsigned int Mode>
+struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
};
-template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
+template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
-template<int UpLo,typename MatrixType,int DestOrder>
+template<int Mode,typename MatrixType,int DestOrder>
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
}
-template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
- : public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
+template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
+ : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
{
public:
+
+ enum { Mode = _Mode };
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::Index Index;
typedef Matrix<Index,Dynamic,1> VectorI;
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
-
+
inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
{
eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
@@ -74,40 +80,76 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
* Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
*/
+#ifndef EIGEN_TEST_EVALUATORS
template<typename OtherDerived>
SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>
operator*(const SparseMatrixBase<OtherDerived>& rhs) const
{
return SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>(*this, rhs.derived());
}
+#else
+ template<typename OtherDerived>
+ Product<SparseSelfAdjointView, OtherDerived>
+ operator*(const SparseMatrixBase<OtherDerived>& rhs) const
+ {
+ return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());
+ }
+#endif
/** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
*
* Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
*/
+#ifndef EIGEN_TEST_EVALUATORS
template<typename OtherDerived> friend
SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject >
operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
{
return SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject>(lhs.derived(), rhs);
}
+#else // EIGEN_TEST_EVALUATORS
+ template<typename OtherDerived> friend
+ Product<OtherDerived, SparseSelfAdjointView>
+ operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
+ {
+ return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
+ }
+#endif // EIGEN_TEST_EVALUATORS
/** Efficient sparse self-adjoint matrix times dense vector/matrix product */
+#ifndef EIGEN_TEST_EVALUATORS
+ template<typename OtherDerived>
+ SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,Mode>
+ operator*(const MatrixBase<OtherDerived>& rhs) const
+ {
+ return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,Mode>(m_matrix, rhs.derived());
+ }
+#else
template<typename OtherDerived>
- SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
+ Product<SparseSelfAdjointView,OtherDerived>
operator*(const MatrixBase<OtherDerived>& rhs) const
{
- return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>(m_matrix, rhs.derived());
+ return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
}
+#endif
/** Efficient dense vector/matrix times sparse self-adjoint matrix product */
+#ifndef EIGEN_TEST_EVALUATORS
template<typename OtherDerived> friend
- DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,UpLo>
+ DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,Mode>
operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
{
- return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,UpLo>(lhs.derived(), rhs.m_matrix);
+ return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,Mode>(lhs.derived(), rhs.m_matrix);
}
+#else
+ template<typename OtherDerived> friend
+ Product<OtherDerived,SparseSelfAdjointView>
+ operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
+ {
+ return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
+ }
+#endif
/** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
* \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
@@ -123,30 +165,31 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
/** \internal triggered by sparse_matrix = SparseSelfadjointView; */
template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
{
- internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
+ internal::permute_symm_to_fullsymm<Mode>(m_matrix, _dest);
}
template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,Index>& _dest) const
{
// TODO directly evaluate into _dest;
SparseMatrix<DestScalar,ColMajor,Index> tmp(_dest.rows(),_dest.cols());
- internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
+ internal::permute_symm_to_fullsymm<Mode>(m_matrix, tmp);
_dest = tmp;
}
/** \returns an expression of P H P^-1 */
- SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
+// #ifndef EIGEN_TEST_EVALUATORS
+ SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
{
- return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
+ return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
}
-
- template<typename SrcMatrixType,int SrcUpLo>
- SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcUpLo>& permutedMatrix)
+
+ template<typename SrcMatrixType,int SrcMode>
+ SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
{
permutedMatrix.evalTo(*this);
return *this;
}
-
+// #endif // EIGEN_TEST_EVALUATORS
SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
{
@@ -154,22 +197,18 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
return *this = src.twistedBy(pnull);
}
- template<typename SrcMatrixType,unsigned int SrcUpLo>
- SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcUpLo>& src)
+ template<typename SrcMatrixType,unsigned int SrcMode>
+ SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
{
PermutationMatrix<Dynamic> pnull;
return *this = src.twistedBy(pnull);
}
-
- // const SparseLLT<PlainObject, UpLo> llt() const;
- // const SparseLDLT<PlainObject, UpLo> ldlt() const;
-
protected:
typename MatrixType::Nested m_matrix;
- mutable VectorI m_countPerRow;
- mutable VectorI m_countPerCol;
+ //mutable VectorI m_countPerRow;
+ //mutable VectorI m_countPerCol;
};
/***************************************************************************
@@ -177,15 +216,15 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
***************************************************************************/
template<typename Derived>
-template<unsigned int UpLo>
-const SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView() const
+template<unsigned int Mode>
+const SparseSelfAdjointView<Derived, Mode> SparseMatrixBase<Derived>::selfadjointView() const
{
return derived();
}
template<typename Derived>
-template<unsigned int UpLo>
-SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView()
+template<unsigned int Mode>
+SparseSelfAdjointView<Derived, Mode> SparseMatrixBase<Derived>::selfadjointView()
{
return derived();
}
@@ -194,16 +233,16 @@ SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView(
* Implementation of SparseSelfAdjointView methods
***************************************************************************/
-template<typename MatrixType, unsigned int UpLo>
+template<typename MatrixType, unsigned int Mode>
template<typename DerivedU>
-SparseSelfAdjointView<MatrixType,UpLo>&
-SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
+SparseSelfAdjointView<MatrixType,Mode>&
+SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
{
- SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
+ SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
if(alpha==Scalar(0))
- m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
+ m_matrix.const_cast_derived() = tmp.template triangularView<Mode>();
else
- m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
+ m_matrix.const_cast_derived() += alpha * tmp.template triangularView<Mode>();
return *this;
}
@@ -212,18 +251,19 @@ SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<Derive
* Implementation of sparse self-adjoint time dense matrix
***************************************************************************/
+#ifndef EIGEN_TEST_EVALUATORS
namespace internal {
-template<typename Lhs, typename Rhs, int UpLo>
-struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo> >
- : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+template<typename Lhs, typename Rhs, int Mode>
+struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,Mode> >
+ : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,Mode>, Lhs, Rhs> >
{
typedef Dense StorageKind;
};
}
-template<typename Lhs, typename Rhs, int UpLo>
+template<typename Lhs, typename Rhs, int Mode>
class SparseSelfAdjointTimeDenseProduct
- : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+ : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,Mode>, Lhs, Rhs>
{
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct)
@@ -241,9 +281,9 @@ class SparseSelfAdjointTimeDenseProduct
enum {
LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
ProcessFirstHalf =
- ((UpLo&(Upper|Lower))==(Upper|Lower))
- || ( (UpLo&Upper) && !LhsIsRowMajor)
- || ( (UpLo&Lower) && LhsIsRowMajor),
+ ((Mode&(Upper|Lower))==(Upper|Lower))
+ || ( (Mode&Upper) && !LhsIsRowMajor)
+ || ( (Mode&Lower) && LhsIsRowMajor),
ProcessSecondHalf = !ProcessFirstHalf
};
for (typename _Lhs::Index j=0; j<m_lhs.outerSize(); ++j)
@@ -276,15 +316,15 @@ class SparseSelfAdjointTimeDenseProduct
};
namespace internal {
-template<typename Lhs, typename Rhs, int UpLo>
-struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo> >
- : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+template<typename Lhs, typename Rhs, int Mode>
+struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,Mode> >
+ : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,Mode>, Lhs, Rhs> >
{};
}
-template<typename Lhs, typename Rhs, int UpLo>
+template<typename Lhs, typename Rhs, int Mode>
class DenseTimeSparseSelfAdjointProduct
- : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+ : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,Mode>, Lhs, Rhs>
{
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct)
@@ -301,16 +341,159 @@ class DenseTimeSparseSelfAdjointProduct
DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&);
};
+#else // EIGEN_TEST_EVALUATORS
+
+namespace internal {
+
+template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
+inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
+{
+ EIGEN_ONLY_USED_FOR_DEBUG(alpha);
+ // TODO use alpha
+ eigen_assert(alpha==AlphaType(1) && "alpha != 1 is not implemented yet, sorry");
+
+ typedef typename evaluator<SparseLhsType>::type LhsEval;
+ typedef typename evaluator<SparseLhsType>::InnerIterator LhsIterator;
+ typedef typename SparseLhsType::Index Index;
+ typedef typename SparseLhsType::Scalar LhsScalar;
+
+ enum {
+ LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
+ ProcessFirstHalf =
+ ((Mode&(Upper|Lower))==(Upper|Lower))
+ || ( (Mode&Upper) && !LhsIsRowMajor)
+ || ( (Mode&Lower) && LhsIsRowMajor),
+ ProcessSecondHalf = !ProcessFirstHalf
+ };
+
+ LhsEval lhsEval(lhs);
+
+ for (Index j=0; j<lhs.outerSize(); ++j)
+ {
+ LhsIterator i(lhsEval,j);
+ if (ProcessSecondHalf)
+ {
+ while (i && i.index()<j) ++i;
+ if(i && i.index()==j)
+ {
+ res.row(j) += i.value() * rhs.row(j);
+ ++i;
+ }
+ }
+ for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
+ {
+ Index a = LhsIsRowMajor ? j : i.index();
+ Index b = LhsIsRowMajor ? i.index() : j;
+ LhsScalar v = i.value();
+ res.row(a) += (v) * rhs.row(b);
+ res.row(b) += numext::conj(v) * rhs.row(a);
+ }
+ if (ProcessFirstHalf && i && (i.index()==j))
+ res.row(j) += i.value() * rhs.row(j);
+ }
+}
+
+// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
+// in the future selfadjoint-ness should be defined by the expression traits
+// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
+template<typename MatrixType, unsigned int Mode>
+struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
+{
+ typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
+ typedef SparseSelfAdjointShape Shape;
+
+ static const int AssumeAliasing = 0;
+};
+
+template<typename LhsView, typename Rhs, int ProductType>
+struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
+{
+ template<typename Dest>
+ static void evalTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs)
+ {
+ typedef typename LhsView::_MatrixTypeNested Lhs;
+ typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
+ typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
+ LhsNested lhsNested(lhsView.matrix());
+ RhsNested rhsNested(rhs);
+
+ dst.setZero();
+ internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, typename Dest::Scalar(1));
+ }
+};
+
+template<typename Lhs, typename RhsView, int ProductType>
+struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
+{
+ template<typename Dest>
+ static void evalTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView)
+ {
+ typedef typename RhsView::_MatrixTypeNested Rhs;
+ typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
+ typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
+ LhsNested lhsNested(lhs);
+ RhsNested rhsNested(rhsView.matrix());
+
+ dst.setZero();
+ // transpoe everything
+ Transpose<Dest> dstT(dst);
+ internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, typename Dest::Scalar(1));
+ }
+};
+
+// NOTE: these two overloads are needed to evaluate the sparse sefladjoint view into a full sparse matrix
+// TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
+
+template<typename LhsView, typename Rhs, int ProductTag>
+struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape, typename traits<LhsView>::Scalar, typename traits<Rhs>::Scalar>
+ : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>::type
+{
+ typedef Product<LhsView, Rhs, DefaultProduct> XprType;
+ typedef typename XprType::PlainObject PlainObject;
+ typedef typename evaluator<PlainObject>::type Base;
+
+ product_evaluator(const XprType& xpr)
+ : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
+ {
+ ::new (static_cast<Base*>(this)) Base(m_result);
+ generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
+ }
+
+protected:
+ typename Rhs::PlainObject m_lhs;
+ PlainObject m_result;
+};
+
+template<typename Lhs, typename RhsView, int ProductTag>
+struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape, typename traits<Lhs>::Scalar, typename traits<RhsView>::Scalar>
+ : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>::type
+{
+ typedef Product<Lhs, RhsView, DefaultProduct> XprType;
+ typedef typename XprType::PlainObject PlainObject;
+ typedef typename evaluator<PlainObject>::type Base;
+
+ product_evaluator(const XprType& xpr)
+ : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
+ {
+ ::new (static_cast<Base*>(this)) Base(m_result);
+ generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
+ }
+
+protected:
+ typename Lhs::PlainObject m_rhs;
+ PlainObject m_result;
+};
+
+} // namespace internal
+
+#endif // EIGEN_TEST_EVALUATORS
+
/***************************************************************************
* Implementation of symmetric copies and permutations
***************************************************************************/
namespace internal {
-
-template<typename MatrixType, int UpLo>
-struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
-};
-template<int UpLo,typename MatrixType,int DestOrder>
+template<int Mode,typename MatrixType,int DestOrder>
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
{
typedef typename MatrixType::Index Index;
@@ -337,11 +520,11 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
Index r = it.row();
Index c = it.col();
Index ip = perm ? perm[i] : i;
- if(UpLo==(Upper|Lower))
+ if(Mode==(Upper|Lower))
count[StorageOrderMatch ? jp : ip]++;
else if(r==c)
count[ip]++;
- else if(( UpLo==Lower && r>c) || ( UpLo==Upper && r<c))
+ else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
{
count[ip]++;
count[jp]++;
@@ -370,7 +553,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
Index jp = perm ? perm[j] : j;
Index ip = perm ? perm[i] : i;
- if(UpLo==(Upper|Lower))
+ if(Mode==(Upper|Lower))
{
Index k = count[StorageOrderMatch ? jp : ip]++;
dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
@@ -382,7 +565,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
dest.innerIndexPtr()[k] = ip;
dest.valuePtr()[k] = it.value();
}
- else if(( (UpLo&Lower)==Lower && r>c) || ( (UpLo&Upper)==Upper && r<c))
+ else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
{
if(!StorageOrderMatch)
std::swap(ip,jp);
@@ -397,7 +580,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri
}
}
-template<int _SrcUpLo,int _DstUpLo,typename MatrixType,int DstOrder>
+template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
{
typedef typename MatrixType::Index Index;
@@ -407,8 +590,8 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
enum {
SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
StorageOrderMatch = int(SrcOrder) == int(DstOrder),
- DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo,
- SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo
+ DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
+ SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
};
Index size = mat.rows();
@@ -421,11 +604,11 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
{
Index i = it.index();
- if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
+ if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
continue;
Index ip = perm ? perm[i] : i;
- count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
}
}
dest.outerIndexPtr()[0] = 0;
@@ -441,17 +624,17 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
{
Index i = it.index();
- if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
+ if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
continue;
Index jp = perm ? perm[j] : j;
Index ip = perm? perm[i] : i;
- Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
- dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
+ Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
if(!StorageOrderMatch) std::swap(ip,jp);
- if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
+ if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
dest.valuePtr()[k] = numext::conj(it.value());
else
dest.valuePtr()[k] = it.value();
@@ -461,9 +644,19 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
}
-template<typename MatrixType,int UpLo>
+// #ifndef EIGEN_TEST_EVALUATORS
+
+namespace internal {
+
+template<typename MatrixType, int Mode>
+struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
+};
+
+}
+
+template<typename MatrixType,int Mode>
class SparseSymmetricPermutationProduct
- : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
+ : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
{
public:
typedef typename MatrixType::Scalar Scalar;
@@ -485,15 +678,15 @@ class SparseSymmetricPermutationProduct
template<typename DestScalar, int Options, typename DstIndex>
void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
{
-// internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
+// internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
SparseMatrix<DestScalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
- internal::permute_symm_to_fullsymm<UpLo>(m_matrix,tmp,m_perm.indices().data());
+ internal::permute_symm_to_fullsymm<Mode>(m_matrix,tmp,m_perm.indices().data());
_dest = tmp;
}
- template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
+ template<typename DestType,unsigned int DestMode> void evalTo(SparseSelfAdjointView<DestType,DestMode>& dest) const
{
- internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
+ internal::permute_symm_to_symm<Mode,DestMode>(m_matrix,dest.matrix(),m_perm.indices().data());
}
protected:
@@ -502,6 +695,10 @@ class SparseSymmetricPermutationProduct
};
+// #else // EIGEN_TEST_EVALUATORS
+
+// #endif // EIGEN_TEST_EVALUATORS
+
} // end namespace Eigen
#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
index fcc18f5c9..b42b33e55 100644
--- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
+++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -46,6 +46,11 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
res.resize(cols, rows);
else
res.resize(rows, cols);
+
+ #ifdef EIGEN_TEST_EVALUATORS
+ typename evaluator<Lhs>::type lhsEval(lhs);
+ typename evaluator<Rhs>::type rhsEval(rhs);
+ #endif
res.reserve(estimated_nnz_prod);
double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols());
@@ -56,12 +61,20 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
// let's do a more accurate determination of the nnz ratio for the current column j of res
tempVector.init(ratioColRes);
tempVector.setZero();
+#ifndef EIGEN_TEST_EVALUATORS
for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+#else
+ for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
+#endif
{
// FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
tempVector.restart();
Scalar x = rhsIt.value();
+#ifndef EIGEN_TEST_EVALUATORS
for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
+#else
+ for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt)
+#endif
{
tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
}
@@ -140,8 +153,58 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,R
}
};
+#ifndef EIGEN_TEST_EVALUATORS
// NOTE the 2 others cases (col row *) must never occur since they are caught
// by ProductReturnType which transforms it to (col col *) by evaluating rhs.
+#else
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::Index> RowMajorMatrixLhs;
+ RowMajorMatrixLhs rowLhs(lhs);
+ sparse_sparse_product_with_pruning_selector<RowMajorMatrixLhs,Rhs,ResultType,RowMajor,RowMajor>(rowLhs,rhs,res,tolerance);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename Lhs::Index> RowMajorMatrixRhs;
+ RowMajorMatrixRhs rowRhs(rhs);
+ sparse_sparse_product_with_pruning_selector<Lhs,RowMajorMatrixRhs,ResultType,RowMajor,RowMajor,RowMajor>(lhs,rowRhs,res,tolerance);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixRhs;
+ ColMajorMatrixRhs colRhs(rhs);
+ internal::sparse_sparse_product_with_pruning_impl<Lhs,ColMajorMatrixRhs,ResultType>(lhs, colRhs, res, tolerance);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixLhs;
+ ColMajorMatrixLhs colLhs(lhs);
+ internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,Rhs,ResultType>(colLhs, rhs, res, tolerance);
+ }
+};
+#endif
} // end namespace internal
diff --git a/Eigen/src/SparseCore/SparseTranspose.h b/Eigen/src/SparseCore/SparseTranspose.h
index 7c300ee8d..f5eff6133 100644
--- a/Eigen/src/SparseCore/SparseTranspose.h
+++ b/Eigen/src/SparseCore/SparseTranspose.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -12,6 +12,7 @@
namespace Eigen {
+#ifndef EIGEN_TEST_EVALUATORS
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
: public SparseMatrixBase<Transpose<MatrixType> >
{
@@ -58,6 +59,68 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInn
Index col() const { return Base::row(); }
};
+#else // EIGEN_TEST_EVALUATORS
+
+// Implement nonZeros() for transpose. I'm not sure that's the best approach for that.
+// Perhaps it should be implemented in Transpose<> itself.
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
+ : public SparseMatrixBase<Transpose<MatrixType> >
+{
+ protected:
+ typedef SparseMatrixBase<Transpose<MatrixType> > Base;
+ public:
+ inline typename MatrixType::Index nonZeros() const { return Base::derived().nestedExpression().nonZeros(); }
+};
+
+namespace internal {
+
+template<typename ArgType>
+struct unary_evaluator<Transpose<ArgType>, IteratorBased>
+ : public evaluator_base<Transpose<ArgType> >
+{
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
+ typedef typename evaluator<ArgType>::ReverseInnerIterator EvalReverseIterator;
+ public:
+ typedef Transpose<ArgType> XprType;
+ typedef typename XprType::Index Index;
+
+ class InnerIterator : public EvalIterator
+ {
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer)
+ : EvalIterator(unaryOp.m_argImpl,outer)
+ {}
+
+ Index row() const { return EvalIterator::col(); }
+ Index col() const { return EvalIterator::row(); }
+ };
+
+ class ReverseInnerIterator : public EvalReverseIterator
+ {
+ public:
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const unary_evaluator& unaryOp, typename XprType::Index outer)
+ : EvalReverseIterator(unaryOp.m_argImpl,outer)
+ {}
+
+ Index row() const { return EvalReverseIterator::col(); }
+ Index col() const { return EvalReverseIterator::row(); }
+ };
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
+ Flags = XprType::Flags
+ };
+
+ unary_evaluator(const XprType& op) :m_argImpl(op.nestedExpression()) {}
+
+ protected:
+ typename evaluator<ArgType>::nestedType m_argImpl;
+};
+
+} // end namespace internal
+
+#endif // EIGEN_TEST_EVALUATORS
+
} // end namespace Eigen
#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/Eigen/src/SparseCore/SparseTriangularView.h b/Eigen/src/SparseCore/SparseTriangularView.h
index 333127b78..87f4ab18d 100644
--- a/Eigen/src/SparseCore/SparseTriangularView.h
+++ b/Eigen/src/SparseCore/SparseTriangularView.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
@@ -13,17 +13,8 @@
namespace Eigen {
-namespace internal {
-
-template<typename MatrixType, int Mode>
-struct traits<SparseTriangularView<MatrixType,Mode> >
-: public traits<MatrixType>
-{};
-
-} // namespace internal
-
-template<typename MatrixType, int Mode> class SparseTriangularView
- : public SparseMatrixBase<SparseTriangularView<MatrixType,Mode> >
+template<typename MatrixType, unsigned int Mode> class TriangularViewImpl<MatrixType,Mode,Sparse>
+ : public SparseMatrixBase<TriangularView<MatrixType,Mode> >
{
enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))
|| ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)),
@@ -31,46 +22,52 @@ template<typename MatrixType, int Mode> class SparseTriangularView
SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
};
+
+ typedef TriangularView<MatrixType,Mode> TriangularViewType;
+
+protected:
+ // dummy solve function to make TriangularView happy.
+ void solve() const;
public:
- EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView)
-
+ EIGEN_SPARSE_PUBLIC_INTERFACE(TriangularViewType)
+
class InnerIterator;
class ReverseInnerIterator;
- inline Index rows() const { return m_matrix.rows(); }
- inline Index cols() const { return m_matrix.cols(); }
-
typedef typename MatrixType::Nested MatrixTypeNested;
typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
- inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {}
-
- /** \internal */
- inline const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
-
+#ifndef EIGEN_TEST_EVALUATORS
template<typename OtherDerived>
typename internal::plain_matrix_type_column_major<OtherDerived>::type
solve(const MatrixBase<OtherDerived>& other) const;
+#else // EIGEN_TEST_EVALUATORS
+ template<typename RhsType, typename DstType>
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const {
+ if(!(internal::is_same<RhsType,DstType>::value && internal::extract_data(dst) == internal::extract_data(rhs)))
+ dst = rhs;
+ this->template solveInPlace(dst);
+ }
+#endif // EIGEN_TEST_EVALUATORS
template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
-
- protected:
- MatrixTypeNested m_matrix;
+
};
-template<typename MatrixType, int Mode>
-class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator
+template<typename MatrixType, unsigned int Mode>
+class TriangularViewImpl<MatrixType,Mode,Sparse>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator
{
typedef typename MatrixTypeNestedCleaned::InnerIterator Base;
- typedef typename SparseTriangularView::Index Index;
+ typedef typename TriangularViewType::Index Index;
public:
- EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer)
- : Base(view.nestedExpression(), outer), m_returnOne(false)
+ EIGEN_STRONG_INLINE InnerIterator(const TriangularViewImpl& view, Index outer)
+ : Base(view.derived().nestedExpression(), outer), m_returnOne(false)
{
if(SkipFirst)
{
@@ -132,15 +129,15 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe
bool m_returnOne;
};
-template<typename MatrixType, int Mode>
-class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator
+template<typename MatrixType, unsigned int Mode>
+class TriangularViewImpl<MatrixType,Mode,Sparse>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator
{
typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base;
- typedef typename SparseTriangularView::Index Index;
+ typedef typename TriangularViewImpl::Index Index;
public:
- EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTriangularView& view, Index outer)
- : Base(view.nestedExpression(), outer)
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const TriangularViewType& view, Index outer)
+ : Base(view.derived().nestedExpression(), outer)
{
eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal");
if(SkipLast) {
@@ -166,9 +163,118 @@ class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public Matri
}
};
+#ifdef EIGEN_TEST_EVALUATORS
+namespace internal {
+
+template<typename ArgType, unsigned int Mode>
+struct unary_evaluator<TriangularView<ArgType,Mode>, IteratorBased>
+ : evaluator_base<TriangularView<ArgType,Mode> >
+{
+ typedef TriangularView<ArgType,Mode> XprType;
+
+protected:
+
+ typedef typename XprType::Scalar Scalar;
+ typedef typename XprType::Index Index;
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
+
+ enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit))
+ || ((Mode&Upper) && (ArgType::Flags&RowMajorBit)),
+ SkipLast = !SkipFirst,
+ SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
+ HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
+ };
+
+public:
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
+ Flags = XprType::Flags
+ };
+
+ unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()) {}
+
+ class InnerIterator : public EvalIterator
+ {
+ typedef EvalIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& xprEval, Index outer)
+ : Base(xprEval.m_argImpl,outer), m_returnOne(false)
+ {
+ if(SkipFirst)
+ {
+ while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
+ Base::operator++();
+ if(HasUnitDiag)
+ m_returnOne = true;
+ }
+ else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
+ {
+ if((!SkipFirst) && Base::operator bool())
+ Base::operator++();
+ m_returnOne = true;
+ }
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ if(HasUnitDiag && m_returnOne)
+ m_returnOne = false;
+ else
+ {
+ Base::operator++();
+ if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))
+ {
+ if((!SkipFirst) && Base::operator bool())
+ Base::operator++();
+ m_returnOne = true;
+ }
+ }
+ return *this;
+ }
+
+ EIGEN_STRONG_INLINE operator bool() const
+ {
+ if(HasUnitDiag && m_returnOne)
+ return true;
+ if(SkipFirst) return Base::operator bool();
+ else
+ {
+ if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
+ else return (Base::operator bool() && this->index() <= this->outer());
+ }
+ }
+
+// inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); }
+// inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); }
+ inline Index index() const
+ {
+ if(HasUnitDiag && m_returnOne) return Base::outer();
+ else return Base::index();
+ }
+ inline Scalar value() const
+ {
+ if(HasUnitDiag && m_returnOne) return Scalar(1);
+ else return Base::value();
+ }
+
+ protected:
+ bool m_returnOne;
+ private:
+ Scalar& valueRef();
+ };
+
+protected:
+ typename evaluator<ArgType>::type m_argImpl;
+};
+
+} // end namespace internal
+#endif
+
template<typename Derived>
template<int Mode>
-inline const SparseTriangularView<Derived, Mode>
+inline const TriangularView<Derived, Mode>
SparseMatrixBase<Derived>::triangularView() const
{
return derived();
diff --git a/Eigen/src/SparseCore/SparseUtil.h b/Eigen/src/SparseCore/SparseUtil.h
index 02c19d18f..0183907a1 100644
--- a/Eigen/src/SparseCore/SparseUtil.h
+++ b/Eigen/src/SparseCore/SparseUtil.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -43,6 +43,8 @@ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
+#ifndef EIGEN_TEST_EVALUATORS
+
#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \
typedef BaseClass Base; \
typedef typename Eigen::internal::traits<Derived >::Scalar Scalar; \
@@ -52,13 +54,32 @@ EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
typedef typename Eigen::internal::traits<Derived >::Index Index; \
enum { RowsAtCompileTime = Eigen::internal::traits<Derived >::RowsAtCompileTime, \
ColsAtCompileTime = Eigen::internal::traits<Derived >::ColsAtCompileTime, \
- Flags = Eigen::internal::traits<Derived >::Flags, \
+ Flags = Eigen::internal::traits<Derived>::Flags, \
CoeffReadCost = Eigen::internal::traits<Derived >::CoeffReadCost, \
SizeAtCompileTime = Base::SizeAtCompileTime, \
IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
using Base::derived; \
using Base::const_cast_derived;
+#else // EIGEN_TEST_EVALUATORS
+
+#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \
+ typedef BaseClass Base; \
+ typedef typename Eigen::internal::traits<Derived >::Scalar Scalar; \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
+ typedef typename Eigen::internal::nested<Derived >::type Nested; \
+ typedef typename Eigen::internal::traits<Derived >::StorageKind StorageKind; \
+ typedef typename Eigen::internal::traits<Derived >::Index Index; \
+ enum { RowsAtCompileTime = Eigen::internal::traits<Derived >::RowsAtCompileTime, \
+ ColsAtCompileTime = Eigen::internal::traits<Derived >::ColsAtCompileTime, \
+ Flags = Eigen::internal::traits<Derived>::Flags, \
+ SizeAtCompileTime = Base::SizeAtCompileTime, \
+ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
+ using Base::derived; \
+ using Base::const_cast_derived;
+
+#endif // EIGEN_TEST_EVALUATORS
+
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
_EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived >)
@@ -73,7 +94,6 @@ template<typename _Scalar, int _Flags = 0, typename _Index = int> class Dynamic
template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
template<typename _Scalar, int _Flags = 0, typename _Index = int> class MappedSparseMatrix;
-template<typename MatrixType, int Mode> class SparseTriangularView;
template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
template<typename MatrixType> class SparseView;
@@ -131,11 +151,23 @@ template<typename T> struct plain_matrix_type<T,Sparse>
{
typedef typename traits<T>::Scalar _Scalar;
typedef typename traits<T>::Index _Index;
- enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
+ enum { _Options = ((evaluator<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
public:
typedef SparseMatrix<_Scalar, _Options, _Index> type;
};
+template<typename Derived>
+struct generic_xpr_base<Derived, MatrixXpr, Sparse>
+{
+ typedef SparseMatrixBase<Derived> type;
+};
+
+struct SparseTriangularShape { static std::string debugName() { return "SparseTriangularShape"; } };
+struct SparseSelfAdjointShape { static std::string debugName() { return "SparseSelfAdjointShape"; } };
+
+template<> struct glue_shapes<SparseShape,SelfAdjointShape> { typedef SparseSelfAdjointShape type; };
+template<> struct glue_shapes<SparseShape,TriangularShape > { typedef SparseTriangularShape type; };
+
} // end namespace internal
/** \ingroup SparseCore_Module
diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h
index 0b1b389ce..0f9aa9dd1 100644
--- a/Eigen/src/SparseCore/SparseVector.h
+++ b/Eigen/src/SparseCore/SparseVector.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -422,6 +422,7 @@ class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
namespace internal {
+#ifndef EIGEN_TEST_EVALUATORS
template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
static void run(Dest& dst, const Src& src) {
@@ -443,6 +444,55 @@ struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
}
}
};
+#else // EIGEN_TEST_EVALUATORS
+
+template<typename _Scalar, int _Options, typename _Index>
+struct evaluator<SparseVector<_Scalar,_Options,_Index> >
+ : evaluator_base<SparseVector<_Scalar,_Options,_Index> >
+{
+ typedef SparseVector<_Scalar,_Options,_Index> SparseVectorType;
+ typedef typename SparseVectorType::InnerIterator InnerIterator;
+ typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator;
+
+ enum {
+ CoeffReadCost = NumTraits<_Scalar>::ReadCost,
+ Flags = SparseVectorType::Flags
+ };
+
+ evaluator(const SparseVectorType &mat) : m_matrix(mat) {}
+
+ operator SparseVectorType&() { return m_matrix.const_cast_derived(); }
+ operator const SparseVectorType&() const { return m_matrix; }
+
+ const SparseVectorType &m_matrix;
+};
+
+template< typename Dest, typename Src>
+struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
+ static void run(Dest& dst, const Src& src) {
+ eigen_internal_assert(src.innerSize()==src.size());
+ typedef typename internal::evaluator<Src>::type SrcEvaluatorType;
+ SrcEvaluatorType srcEval(src);
+ for(typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it)
+ dst.insert(it.index()) = it.value();
+ }
+};
+
+template< typename Dest, typename Src>
+struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
+ static void run(Dest& dst, const Src& src) {
+ eigen_internal_assert(src.outerSize()==src.size());
+ typedef typename internal::evaluator<Src>::type SrcEvaluatorType;
+ SrcEvaluatorType srcEval(src);
+ for(typename Dest::Index i=0; i<src.size(); ++i)
+ {
+ typename SrcEvaluatorType::InnerIterator it(srcEval, i);
+ if(it)
+ dst.insert(i) = it.value();
+ }
+ }
+};
+#endif // EIGEN_TEST_EVALUATORS
template< typename Dest, typename Src>
struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {
diff --git a/Eigen/src/SparseCore/SparseView.h b/Eigen/src/SparseCore/SparseView.h
index fd8450463..c1c50f6e9 100644
--- a/Eigen/src/SparseCore/SparseView.h
+++ b/Eigen/src/SparseCore/SparseView.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
@@ -34,25 +34,36 @@ class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
public:
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
+ typedef typename internal::remove_all<MatrixType>::type NestedExpression;
SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0),
- typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) :
+ RealScalar m_epsilon = NumTraits<Scalar>::dummy_precision()) :
m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {}
+#ifndef EIGEN_TEST_EVALUATORS
class InnerIterator;
+#endif // EIGEN_TEST_EVALUATORS
inline Index rows() const { return m_matrix.rows(); }
inline Index cols() const { return m_matrix.cols(); }
inline Index innerSize() const { return m_matrix.innerSize(); }
inline Index outerSize() const { return m_matrix.outerSize(); }
-
+
+ /** \returns the nested expression */
+ const typename internal::remove_all<MatrixTypeNested>::type&
+ nestedExpression() const { return m_matrix; }
+
+ Scalar reference() const { return m_reference; }
+ RealScalar epsilon() const { return m_epsilon; }
+
protected:
MatrixTypeNested m_matrix;
Scalar m_reference;
- typename NumTraits<Scalar>::Real m_epsilon;
+ RealScalar m_epsilon;
};
+#ifndef EIGEN_TEST_EVALUATORS
template<typename MatrixType>
class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
{
@@ -80,18 +91,172 @@ protected:
private:
void incrementToNonZero()
{
- while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon))
+ while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.reference(), m_view.epsilon()))
{
IterBase::operator++();
}
}
};
+#else // EIGEN_TEST_EVALUATORS
+
+namespace internal {
+
+// TODO find a way to unify the two following variants
+// This is tricky because implementing an inner iterator on top of an IndexBased evaluator is
+// not easy because the evaluators do not expose the sizes of the underlying expression.
+
+template<typename ArgType>
+struct unary_evaluator<SparseView<ArgType>, IteratorBased>
+ : public evaluator_base<SparseView<ArgType> >
+{
+ typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
+ public:
+ typedef SparseView<ArgType> XprType;
+
+ class InnerIterator : public EvalIterator
+ {
+ typedef typename XprType::Scalar Scalar;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, typename XprType::Index outer)
+ : EvalIterator(sve.m_argImpl,outer), m_view(sve.m_view)
+ {
+ incrementToNonZero();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ EvalIterator::operator++();
+ incrementToNonZero();
+ return *this;
+ }
+
+ using EvalIterator::value;
+
+ protected:
+ const XprType &m_view;
+
+ private:
+ void incrementToNonZero()
+ {
+ while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.reference(), m_view.epsilon()))
+ {
+ EvalIterator::operator++();
+ }
+ }
+ };
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
+ Flags = XprType::Flags
+ };
+
+ unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}
+
+ protected:
+ typename evaluator<ArgType>::nestedType m_argImpl;
+ const XprType &m_view;
+};
+
+template<typename ArgType>
+struct unary_evaluator<SparseView<ArgType>, IndexBased>
+ : public evaluator_base<SparseView<ArgType> >
+{
+ public:
+ typedef SparseView<ArgType> XprType;
+ protected:
+ enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };
+ typedef typename XprType::Index Index;
+ typedef typename XprType::Scalar Scalar;
+ public:
+
+ class InnerIterator
+ {
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, typename XprType::Index outer)
+ : m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize())
+ {
+ incrementToNonZero();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ m_inner++;
+ incrementToNonZero();
+ return *this;
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ {
+ return (IsRowMajor) ? m_sve.m_argImpl.coeff(m_outer, m_inner)
+ : m_sve.m_argImpl.coeff(m_inner, m_outer);
+ }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_inner; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
+
+ protected:
+ const unary_evaluator &m_sve;
+ Index m_inner;
+ const Index m_outer;
+ const Index m_end;
+
+ private:
+ void incrementToNonZero()
+ {
+ while((bool(*this)) && internal::isMuchSmallerThan(value(), m_sve.m_view.reference(), m_sve.m_view.epsilon()))
+ {
+ m_inner++;
+ }
+ }
+ };
+
+ enum {
+ CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
+ Flags = XprType::Flags
+ };
+
+ unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}
+
+ protected:
+ typename evaluator<ArgType>::nestedType m_argImpl;
+ const XprType &m_view;
+};
+
+} // end namespace internal
+
+#endif // EIGEN_TEST_EVALUATORS
+
+template<typename Derived>
+const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& reference,
+ const typename NumTraits<Scalar>::Real& epsilon) const
+{
+ return SparseView<Derived>(derived(), reference, epsilon);
+}
+
+/** \returns an expression of \c *this with values smaller than
+ * \a reference * \a epsilon are removed.
+ *
+ * This method is typically used in conjunction with the product of two sparse matrices
+ * to automatically prune the smallest values as follows:
+ * \code
+ * C = (A*B).pruned(); // suppress numerical zeros (exact)
+ * C = (A*B).pruned(ref);
+ * C = (A*B).pruned(ref,epsilon);
+ * \endcode
+ * where \c ref is a meaningful non zero reference value.
+ * */
template<typename Derived>
-const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
- const typename NumTraits<Scalar>::Real& m_epsilon) const
+const SparseView<Derived>
+SparseMatrixBase<Derived>::pruned(const Scalar& reference,
+ const RealScalar& epsilon) const
{
- return SparseView<Derived>(derived(), m_reference, m_epsilon);
+ return SparseView<Derived>(derived(), reference, epsilon);
}
} // end namespace Eigen
diff --git a/Eigen/src/SparseCore/TriangularSolver.h b/Eigen/src/SparseCore/TriangularSolver.h
index dd55522a7..012a1bb75 100644
--- a/Eigen/src/SparseCore/TriangularSolver.h
+++ b/Eigen/src/SparseCore/TriangularSolver.h
@@ -23,6 +23,7 @@ template<typename Lhs, typename Rhs, int Mode,
int StorageOrder = int(traits<Lhs>::Flags) & RowMajorBit>
struct sparse_solve_triangular_selector;
+#ifndef EIGEN_TEST_EVALUATORS
// forward substitution, row-major
template<typename Lhs, typename Rhs, int Mode>
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
@@ -163,13 +164,166 @@ struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
}
};
+#else // EIGEN_TEST_EVALUATORS
+
+// forward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ typedef typename Lhs::Index Index;
+ typedef typename evaluator<Lhs>::type LhsEval;
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ LhsEval lhsEval(lhs);
+ for(Index col=0 ; col<other.cols() ; ++col)
+ {
+ for(Index i=0; i<lhs.rows(); ++i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ Scalar lastVal(0);
+ Index lastIndex = 0;
+ for(LhsIterator it(lhsEval, i); it; ++it)
+ {
+ lastVal = it.value();
+ lastIndex = it.index();
+ if(lastIndex==i)
+ break;
+ tmp -= lastVal * other.coeff(lastIndex,col);
+ }
+ if (Mode & UnitDiag)
+ other.coeffRef(i,col) = tmp;
+ else
+ {
+ eigen_assert(lastIndex==i);
+ other.coeffRef(i,col) = tmp/lastVal;
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ typedef typename Lhs::Index Index;
+ typedef typename evaluator<Lhs>::type LhsEval;
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ LhsEval lhsEval(lhs);
+ for(Index col=0 ; col<other.cols() ; ++col)
+ {
+ for(Index i=lhs.rows()-1 ; i>=0 ; --i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ Scalar l_ii = 0;
+ LhsIterator it(lhsEval, i);
+ while(it && it.index()<i)
+ ++it;
+ if(!(Mode & UnitDiag))
+ {
+ eigen_assert(it && it.index()==i);
+ l_ii = it.value();
+ ++it;
+ }
+ else if (it && it.index() == i)
+ ++it;
+ for(; it; ++it)
+ {
+ tmp -= it.value() * other.coeff(it.index(),col);
+ }
+
+ if (Mode & UnitDiag) other.coeffRef(i,col) = tmp;
+ else other.coeffRef(i,col) = tmp/l_ii;
+ }
+ }
+ }
+};
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ typedef typename Lhs::Index Index;
+ typedef typename evaluator<Lhs>::type LhsEval;
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ LhsEval lhsEval(lhs);
+ for(Index col=0 ; col<other.cols() ; ++col)
+ {
+ for(Index i=0; i<lhs.cols(); ++i)
+ {
+ Scalar& tmp = other.coeffRef(i,col);
+ if (tmp!=Scalar(0)) // optimization when other is actually sparse
+ {
+ LhsIterator it(lhsEval, i);
+ while(it && it.index()<i)
+ ++it;
+ if(!(Mode & UnitDiag))
+ {
+ eigen_assert(it && it.index()==i);
+ tmp /= it.value();
+ }
+ if (it && it.index()==i)
+ ++it;
+ for(; it; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ typedef typename Lhs::Index Index;
+ typedef typename evaluator<Lhs>::type LhsEval;
+ typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ LhsEval lhsEval(lhs);
+ for(Index col=0 ; col<other.cols() ; ++col)
+ {
+ for(Index i=lhs.cols()-1; i>=0; --i)
+ {
+ Scalar& tmp = other.coeffRef(i,col);
+ if (tmp!=Scalar(0)) // optimization when other is actually sparse
+ {
+ if(!(Mode & UnitDiag))
+ {
+ // TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements
+ LhsIterator it(lhsEval, i);
+ while(it && it.index()!=i)
+ ++it;
+ eigen_assert(it && it.index()==i);
+ other.coeffRef(i,col) /= it.value();
+ }
+ LhsIterator it(lhsEval, i);
+ for(; it && it.index()<i; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+ }
+};
+
+#endif // EIGEN_TEST_EVALUATORS
} // end namespace internal
-template<typename ExpressionType,int Mode>
+template<typename ExpressionType,unsigned int Mode>
template<typename OtherDerived>
-void SparseTriangularView<ExpressionType,Mode>::solveInPlace(MatrixBase<OtherDerived>& other) const
+void TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(MatrixBase<OtherDerived>& other) const
{
- eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows());
+ eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows());
eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
@@ -178,21 +332,23 @@ void SparseTriangularView<ExpressionType,Mode>::solveInPlace(MatrixBase<OtherDer
typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
OtherCopy otherCopy(other.derived());
- internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(m_matrix, otherCopy);
+ internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(derived().nestedExpression(), otherCopy);
if (copy)
other = otherCopy;
}
-template<typename ExpressionType,int Mode>
+#ifndef EIGEN_TEST_EVALUATORS
+template<typename ExpressionType,unsigned int Mode>
template<typename OtherDerived>
typename internal::plain_matrix_type_column_major<OtherDerived>::type
-SparseTriangularView<ExpressionType,Mode>::solve(const MatrixBase<OtherDerived>& other) const
+TriangularViewImpl<ExpressionType,Mode,Sparse>::solve(const MatrixBase<OtherDerived>& other) const
{
typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
solveInPlace(res);
return res;
}
+#endif
// pure sparse path
@@ -290,11 +446,11 @@ struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
} // end namespace internal
-template<typename ExpressionType,int Mode>
+template<typename ExpressionType,unsigned int Mode>
template<typename OtherDerived>
-void SparseTriangularView<ExpressionType,Mode>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
+void TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
{
- eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows());
+ eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows());
eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
// enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
@@ -303,7 +459,7 @@ void SparseTriangularView<ExpressionType,Mode>::solveInPlace(SparseMatrixBase<Ot
// typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
// OtherCopy otherCopy(other.derived());
- internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(m_matrix, other.derived());
+ internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(derived().nestedExpression(), other.derived());
// if (copy)
// other = otherCopy;