aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen/src/SparseCore
diff options
context:
space:
mode:
Diffstat (limited to 'Eigen/src/SparseCore')
-rw-r--r--Eigen/src/SparseCore/CMakeLists.txt6
-rw-r--r--Eigen/src/SparseCore/ConservativeSparseSparseProduct.h2
-rw-r--r--Eigen/src/SparseCore/SparseAssign.h33
-rw-r--r--Eigen/src/SparseCore/SparseBlock.h6
-rw-r--r--Eigen/src/SparseCore/SparseCompressedBase.h19
-rw-r--r--Eigen/src/SparseCore/SparseCwiseBinaryOp.h55
-rw-r--r--Eigen/src/SparseCore/SparseDenseProduct.h20
-rw-r--r--Eigen/src/SparseCore/SparseMap.h22
-rw-r--r--Eigen/src/SparseCore/SparseMatrix.h8
-rw-r--r--Eigen/src/SparseCore/SparseMatrixBase.h28
-rw-r--r--Eigen/src/SparseCore/SparseProduct.h28
-rw-r--r--Eigen/src/SparseCore/SparseRedux.h5
-rw-r--r--Eigen/src/SparseCore/SparseSelfAdjointView.h87
-rw-r--r--Eigen/src/SparseCore/SparseSparseProductWithPruning.h2
-rw-r--r--Eigen/src/SparseCore/SparseVector.h2
15 files changed, 188 insertions, 135 deletions
diff --git a/Eigen/src/SparseCore/CMakeLists.txt b/Eigen/src/SparseCore/CMakeLists.txt
deleted file mode 100644
index d860452a6..000000000
--- a/Eigen/src/SparseCore/CMakeLists.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-FILE(GLOB Eigen_SparseCore_SRCS "*.h")
-
-INSTALL(FILES
- ${Eigen_SparseCore_SRCS}
- DESTINATION ${INCLUDE_INSTALL_DIR}/Eigen/src/SparseCore COMPONENT Devel
- )
diff --git a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
index 0f6835846..492eb0a29 100644
--- a/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
+++ b/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -143,7 +143,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
// If the result is tall and thin (in the extreme case a column vector)
// then it is faster to sort the coefficients inplace instead of transposing twice.
// FIXME, the following heuristic is probably not very good.
- if(lhs.rows()>=rhs.cols())
+ if(lhs.rows()>rhs.cols())
{
ColMajorMatrix resCol(lhs.rows(),rhs.cols());
// perform sorted insertion
diff --git a/Eigen/src/SparseCore/SparseAssign.h b/Eigen/src/SparseCore/SparseAssign.h
index 4a8dd12e4..fa5386599 100644
--- a/Eigen/src/SparseCore/SparseAssign.h
+++ b/Eigen/src/SparseCore/SparseAssign.h
@@ -34,8 +34,8 @@ template<typename OtherDerived>
inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
{
// by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine
- internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar> >
- ::run(derived(), other.derived(), internal::assign_op<Scalar>());
+ internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> >
+ ::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
@@ -124,24 +124,24 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
}
// Generic Sparse to Sparse assignment
-template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
-struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse, Scalar>
+template< typename DstXprType, typename SrcXprType, typename Functor>
+struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
{
- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
assign_sparse_to_sparse(dst.derived(), src.derived());
}
};
// Generic Sparse to Dense assignment
-template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
-struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Scalar>
+template< typename DstXprType, typename SrcXprType, typename Functor>
+struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense>
{
static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
{
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
- if(internal::is_same<Functor,internal::assign_op<Scalar> >::value)
+ if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)
dst.setZero();
internal::evaluator<SrcXprType> srcEval(src);
@@ -156,10 +156,10 @@ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Scalar>
// Specialization for "dst = dec.solve(rhs)"
// NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
-struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar>, Sparse2Sparse, Scalar>
+struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse>
{
typedef Solve<DecType,RhsType> SrcXprType;
- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
{
src.dec()._solve_impl(src.rhs(), dst);
}
@@ -169,14 +169,15 @@ struct Diagonal2Sparse {};
template<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; };
-template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
-struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse, Scalar>
+template< typename DstXprType, typename SrcXprType, typename Functor>
+struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
{
typedef typename DstXprType::StorageIndex StorageIndex;
+ typedef typename DstXprType::Scalar Scalar;
typedef Array<StorageIndex,Dynamic,1> ArrayXI;
typedef Array<Scalar,Dynamic,1> ArrayXS;
template<int Options>
- static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
+ static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
Index size = src.diagonal().size();
dst.makeCompressed();
@@ -187,15 +188,15 @@ struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse, Scalar>
}
template<typename DstDerived>
- static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
dst.diagonal() = src.diagonal();
}
- static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar> &/*func*/)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{ dst.diagonal() += src.diagonal(); }
- static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar> &/*func*/)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{ dst.diagonal() -= src.diagonal(); }
};
} // end namespace internal
diff --git a/Eigen/src/SparseCore/SparseBlock.h b/Eigen/src/SparseCore/SparseBlock.h
index 82fae8c4b..13e8b0bf1 100644
--- a/Eigen/src/SparseCore/SparseBlock.h
+++ b/Eigen/src/SparseCore/SparseBlock.h
@@ -189,9 +189,9 @@ public:
StorageIndex p = StorageIndex(start);
for(Index k=0; k<m_outerSize.value(); ++k)
{
- Index nnz_k = tmp.innerVector(k).nonZeros();
+ StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros());
if(!m_matrix.isCompressed())
- matrix.innerNonZeroPtr()[m_outerStart+k] = StorageIndex(nnz_k);
+ matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k;
matrix.outerIndexPtr()[m_outerStart+k] = p;
p += nnz_k;
}
@@ -504,6 +504,7 @@ template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
: public EvalIterator
{
+ enum { IsRowMajor = unary_evaluator::IsRowMajor };
const XprType& m_block;
Index m_end;
public:
@@ -528,6 +529,7 @@ public:
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
{
+ enum { IsRowMajor = unary_evaluator::IsRowMajor };
const unary_evaluator& m_eval;
Index m_outerPos;
Index m_innerIndex;
diff --git a/Eigen/src/SparseCore/SparseCompressedBase.h b/Eigen/src/SparseCore/SparseCompressedBase.h
index 15854a73b..55ad91f46 100644
--- a/Eigen/src/SparseCore/SparseCompressedBase.h
+++ b/Eigen/src/SparseCore/SparseCompressedBase.h
@@ -106,6 +106,25 @@ class SparseCompressedBase
/** \returns whether \c *this is in compressed form. */
inline bool isCompressed() const { return innerNonZeroPtr()==0; }
+ /** \returns a read-only view of the stored coefficients as a 1D array expression.
+ *
+ * \warning this method is for \b compressed \b storage \b only, and it will trigger an assertion otherwise.
+ *
+ * \sa valuePtr(), isCompressed() */
+ const Map<const Array<Scalar,Dynamic,1> > coeffs() const { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }
+
+ /** \returns a read-write view of the stored coefficients as a 1D array expression
+ *
+ * \warning this method is for \b compressed \b storage \b only, and it will trigger an assertion otherwise.
+ *
+ * Here is an example:
+ * \include SparseMatrix_coeffs.cpp
+ * and the output is:
+ * \include SparseMatrix_coeffs.out
+ *
+ * \sa valuePtr(), isCompressed() */
+ Map<Array<Scalar,Dynamic,1> > coeffs() { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }
+
protected:
/** Default constructor. Do nothing. */
SparseCompressedBase() {}
diff --git a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
index c57d9ac59..aad7b7d79 100644
--- a/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
+++ b/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -28,6 +28,9 @@ namespace Eigen {
// generic sparse
// 4 - dense op dense product dense
// generic dense
+//
+// TODO to ease compiler job, we could specialize product/quotient with a scalar
+// and fallback to cwise-unary evaluator using bind1st_op and bind2nd_op.
template<typename BinaryOp, typename Lhs, typename Rhs>
class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
@@ -165,7 +168,7 @@ public:
public:
EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
- : m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_id(-1), m_innerSize(aEval.m_expr.rhs().innerSize())
+ : m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.rhs().innerSize())
{
this->operator++();
}
@@ -189,7 +192,7 @@ public:
return *this;
}
- EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
+ EIGEN_STRONG_INLINE Scalar value() const { eigen_internal_assert(m_id<m_innerSize); return m_value; }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_rhsIter.outer() : m_id; }
@@ -253,7 +256,7 @@ public:
public:
EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
- : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_id(-1), m_innerSize(aEval.m_expr.lhs().innerSize())
+ : m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.lhs().innerSize())
{
this->operator++();
}
@@ -277,7 +280,7 @@ public:
return *this;
}
- EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
+ EIGEN_STRONG_INLINE Scalar value() const { eigen_internal_assert(m_id<m_innerSize); return m_value; }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_lhsIter.outer() : m_id; }
@@ -323,12 +326,12 @@ protected:
};
// "sparse .* sparse"
-template<typename T, typename Lhs, typename Rhs>
-struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs>, IteratorBased, IteratorBased>
- : evaluator_base<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs> >
+template<typename T1, typename T2, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IteratorBased, IteratorBased>
+ : evaluator_base<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
{
protected:
- typedef scalar_product_op<T> BinaryOp;
+ typedef scalar_product_op<T1,T2> BinaryOp;
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
@@ -407,12 +410,12 @@ protected:
};
// "dense .* sparse"
-template<typename T, typename Lhs, typename Rhs>
-struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs>, IndexBased, IteratorBased>
- : evaluator_base<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs> >
+template<typename T1, typename T2, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IndexBased, IteratorBased>
+ : evaluator_base<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
{
protected:
- typedef scalar_product_op<T> BinaryOp;
+ typedef scalar_product_op<T1,T2> BinaryOp;
typedef evaluator<Lhs> LhsEvaluator;
typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
@@ -480,12 +483,12 @@ protected:
};
// "sparse .* dense"
-template<typename T, typename Lhs, typename Rhs>
-struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs>, IteratorBased, IndexBased>
- : evaluator_base<CwiseBinaryOp<scalar_product_op<T>, Lhs, Rhs> >
+template<typename T1, typename T2, typename Lhs, typename Rhs>
+struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IteratorBased, IndexBased>
+ : evaluator_base<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
{
protected:
- typedef scalar_product_op<T> BinaryOp;
+ typedef scalar_product_op<T1,T2> BinaryOp;
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
typedef evaluator<Rhs> RhsEvaluator;
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
@@ -579,7 +582,7 @@ template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator+=(const DiagonalBase<OtherDerived>& other)
{
- call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar>());
+ call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
@@ -587,7 +590,7 @@ template<typename Derived>
template<typename OtherDerived>
Derived& SparseMatrixBase<Derived>::operator-=(const DiagonalBase<OtherDerived>& other)
{
- call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar>());
+ call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
@@ -600,31 +603,31 @@ SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) c
}
template<typename DenseDerived, typename SparseDerived>
-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>
operator+(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
{
- return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
+ return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
}
template<typename SparseDerived, typename DenseDerived>
-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
operator+(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
{
- return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
+ return CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
}
template<typename DenseDerived, typename SparseDerived>
-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>
operator-(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
{
- return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
+ return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
}
template<typename SparseDerived, typename DenseDerived>
-EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
+EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
operator-(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
{
- return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
+ return CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
}
} // end namespace Eigen
diff --git a/Eigen/src/SparseCore/SparseDenseProduct.h b/Eigen/src/SparseCore/SparseDenseProduct.h
index c9da8a2bb..0547db596 100644
--- a/Eigen/src/SparseCore/SparseDenseProduct.h
+++ b/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -72,14 +72,16 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, t
};
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
-template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
-struct scalar_product_traits<T1, Ref<T2/*, _Options, _StrideType*/> >
-{
- enum {
- Defined = 1
- };
- typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
-};
+// -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators
+// template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
+// struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> >
+// {
+// enum {
+// Defined = 1
+// };
+// typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
+// };
+
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
{
@@ -95,7 +97,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, A
for(Index j=0; j<lhs.outerSize(); ++j)
{
// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
- typename internal::scalar_product_traits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
+ typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.coeffRef(it.index(),c) += it.value() * rhs_j;
}
diff --git a/Eigen/src/SparseCore/SparseMap.h b/Eigen/src/SparseCore/SparseMap.h
index eb241c3e2..f99be3379 100644
--- a/Eigen/src/SparseCore/SparseMap.h
+++ b/Eigen/src/SparseCore/SparseMap.h
@@ -166,12 +166,17 @@ class SparseMapBase<Derived,WriteAccessors>
using Base::innerIndexPtr;
using Base::outerIndexPtr;
using Base::innerNonZeroPtr;
- inline Scalar* valuePtr() { return Base::m_values; }
+ /** \copydoc SparseMatrix::valuePtr */
+ inline Scalar* valuePtr() { return Base::m_values; }
+ /** \copydoc SparseMatrix::innerIndexPtr */
inline StorageIndex* innerIndexPtr() { return Base::m_innerIndices; }
+ /** \copydoc SparseMatrix::outerIndexPtr */
inline StorageIndex* outerIndexPtr() { return Base::m_outerIndex; }
+ /** \copydoc SparseMatrix::innerNonZeroPtr */
inline StorageIndex* innerNonZeroPtr() { return Base::m_innerNonZeros; }
//----------------------------------------
+ /** \copydoc SparseMatrix::coeffRef */
inline Scalar& coeffRef(Index row, Index col)
{
const Index outer = IsRowMajor ? row : col;
@@ -181,14 +186,14 @@ class SparseMapBase<Derived,WriteAccessors>
Index end = Base::isCompressed() ? Base::m_outerIndex[outer+1] : start + Base::m_innerNonZeros[outer];
eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
- Index* r = std::lower_bound(&Base::m_innerIndices[start],&Base::m_innerIndices[end],inner);
+ StorageIndex* r = std::lower_bound(&Base::m_innerIndices[start],&Base::m_innerIndices[end],inner);
const Index id = r - &Base::m_innerIndices[0];
eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
return const_cast<Scalar*>(Base::m_values)[id];
}
inline SparseMapBase(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr,
- Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)
+ Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
{}
@@ -233,13 +238,15 @@ class Map<SparseMatrixType>
* stored as a sparse format as defined by the pointers \a outerIndexPtr, \a innerIndexPtr, and \a valuePtr.
* If the optional parameter \a innerNonZerosPtr is the null pointer, then a standard compressed format is assumed.
*
+ * This constructor is available only if \c SparseMatrixType is non-const.
+ *
* More details on the expected storage schemes are given in the \ref TutorialSparse "manual pages".
*/
inline Map(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr,
StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
{}
-
+#ifndef EIGEN_PARSED_BY_DOXYGEN
/** Empty destructor */
inline ~Map() {}
};
@@ -254,7 +261,12 @@ class Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType
enum { IsRowMajor = Base::IsRowMajor };
public:
-
+#endif
+ /** This is the const version of the above constructor.
+ *
+ * This constructor is available only if \c SparseMatrixType is const, e.g.:
+ * \code Map<const SparseMatrix<double> > \endcode
+ */
inline Map(Index rows, Index cols, Index nnz, const StorageIndex* outerIndexPtr,
const StorageIndex* innerIndexPtr, const Scalar* valuePtr, const StorageIndex* innerNonZerosPtr = 0)
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
diff --git a/Eigen/src/SparseCore/SparseMatrix.h b/Eigen/src/SparseCore/SparseMatrix.h
index 760e151eb..64ca5fc44 100644
--- a/Eigen/src/SparseCore/SparseMatrix.h
+++ b/Eigen/src/SparseCore/SparseMatrix.h
@@ -35,7 +35,7 @@ namespace Eigen {
* \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
*
* This class can be extended with the help of the plugin mechanism described on the page
- * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
*/
namespace internal {
@@ -440,7 +440,7 @@ class SparseMatrix
template<typename InputIterators,typename DupFunctor>
void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
- void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar>()); }
+ void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
template<typename DupFunctor>
void collapseDuplicates(DupFunctor dup_func = DupFunctor());
@@ -979,7 +979,7 @@ template<typename Scalar, int _Options, typename _Index>
template<typename InputIterators>
void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
{
- internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index> >(begin, end, *this, internal::scalar_sum_op<Scalar>());
+ internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
}
/** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
@@ -1080,7 +1080,7 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt
IndexVector positions(dest.outerSize());
for (Index j=0; j<dest.outerSize(); ++j)
{
- Index tmp = dest.m_outerIndex[j];
+ StorageIndex tmp = dest.m_outerIndex[j];
dest.m_outerIndex[j] = count;
positions[j] = count;
count += tmp;
diff --git a/Eigen/src/SparseCore/SparseMatrixBase.h b/Eigen/src/SparseCore/SparseMatrixBase.h
index 2a90f40bf..8816bcff4 100644
--- a/Eigen/src/SparseCore/SparseMatrixBase.h
+++ b/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -21,16 +21,10 @@ namespace Eigen {
* \tparam Derived is the derived type, e.g. a sparse matrix type, or an expression, etc.
*
* This class can be extended with the help of the plugin mechanism described on the page
- * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
*/
template<typename Derived> class SparseMatrixBase
-#ifndef EIGEN_PARSED_BY_DOXYGEN
- : public internal::special_scalar_op_base<Derived,typename internal::traits<Derived>::Scalar,
- typename NumTraits<typename internal::traits<Derived>::Scalar>::Real,
- EigenBase<Derived> >
-#else
: public EigenBase<Derived>
-#endif // not EIGEN_PARSED_BY_DOXYGEN
{
public:
@@ -142,12 +136,20 @@ template<typename Derived> class SparseMatrixBase
inline Derived& const_cast_derived() const
{ return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
- typedef internal::special_scalar_op_base<Derived, Scalar, RealScalar, EigenBase<Derived> > Base;
- using Base::operator*;
- using Base::operator/;
+ typedef EigenBase<Derived> Base;
+
#endif // not EIGEN_PARSED_BY_DOXYGEN
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
+#ifdef EIGEN_PARSED_BY_DOXYGEN
+#define EIGEN_DOC_UNARY_ADDONS(METHOD,OP) /** <p>This method does not change the sparsity of \c *this: the OP is applied to explicitly stored coefficients only. \sa SparseCompressedBase::coeffs() </p> */
+#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /** <p> \warning This method returns a read-only expression for any sparse matrices. \sa \ref TutorialSparse_SubMatrices "Sparse block operations" </p> */
+#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) /** <p> \warning This method returns a read-write expression for COND sparse matrices only. Otherwise, the returned expression is read-only. \sa \ref TutorialSparse_SubMatrices "Sparse block operations" </p> */
+#else
+#define EIGEN_DOC_UNARY_ADDONS(X,Y)
+#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
+#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND)
+#endif
# include "../plugins/CommonCwiseUnaryOps.h"
# include "../plugins/CommonCwiseBinaryOps.h"
# include "../plugins/MatrixCwiseUnaryOps.h"
@@ -156,8 +158,10 @@ template<typename Derived> class SparseMatrixBase
# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
# include EIGEN_SPARSEMATRIXBASE_PLUGIN
# endif
-# undef EIGEN_CURRENT_STORAGE_BASE_CLASS
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+#undef EIGEN_DOC_UNARY_ADDONS
+#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
+#undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF
/** \returns the number of rows. \sa cols() */
inline Index rows() const { return derived().rows(); }
@@ -263,7 +267,7 @@ template<typename Derived> class SparseMatrixBase
Derived& operator/=(const Scalar& other);
template<typename OtherDerived> struct CwiseProductDenseReturnType {
- typedef CwiseBinaryOp<internal::scalar_product_op<typename internal::scalar_product_traits<
+ typedef CwiseBinaryOp<internal::scalar_product_op<typename ScalarBinaryOpTraits<
typename internal::traits<Derived>::Scalar,
typename internal::traits<OtherDerived>::Scalar
>::ReturnType>,
diff --git a/Eigen/src/SparseCore/SparseProduct.h b/Eigen/src/SparseCore/SparseProduct.h
index cbd0db71b..7a5ad0635 100644
--- a/Eigen/src/SparseCore/SparseProduct.h
+++ b/Eigen/src/SparseCore/SparseProduct.h
@@ -45,7 +45,7 @@ struct generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
// dense += sparse * sparse
template<typename Dest,typename ActualLhs>
- static void addTo(Dest& dst, const ActualLhs& lhs, const Rhs& rhs, int* = typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type(0) )
+ static void addTo(Dest& dst, const ActualLhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)
{
typedef typename nested_eval<ActualLhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
@@ -57,7 +57,7 @@ struct generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
// dense -= sparse * sparse
template<typename Dest>
- static void subTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, int* = typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type(0) )
+ static void subTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)
{
addTo(dst, -lhs, rhs);
}
@@ -99,10 +99,10 @@ struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, SparseShape, Produc
// dense = sparse-product (can be sparse*sparse, sparse*perm, etc.)
template< typename DstXprType, typename Lhs, typename Rhs>
-struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar>, Sparse2Dense>
+struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
{
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
{
generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
}
@@ -110,10 +110,10 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assig
// dense += sparse-product (can be sparse*sparse, sparse*perm, etc.)
template< typename DstXprType, typename Lhs, typename Rhs>
-struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar>, Sparse2Dense>
+struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
{
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
- static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar> &)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
{
generic_product_impl<Lhs, Rhs>::addTo(dst,src.lhs(),src.rhs());
}
@@ -121,24 +121,24 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_a
// dense -= sparse-product (can be sparse*sparse, sparse*perm, etc.)
template< typename DstXprType, typename Lhs, typename Rhs>
-struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar>, Sparse2Dense>
+struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
{
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
- static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar> &)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
{
generic_product_impl<Lhs, Rhs>::subTo(dst,src.lhs(),src.rhs());
}
};
template<typename Lhs, typename Rhs, int Options>
-struct evaluator<SparseView<Product<Lhs, Rhs, Options> > >
+struct unary_evaluator<SparseView<Product<Lhs, Rhs, Options> >, IteratorBased>
: public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject>
{
typedef SparseView<Product<Lhs, Rhs, Options> > XprType;
typedef typename XprType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
-
- explicit evaluator(const XprType& xpr)
+
+ explicit unary_evaluator(const XprType& xpr)
: m_result(xpr.rows(), xpr.cols())
{
using std::abs;
@@ -147,13 +147,13 @@ struct evaluator<SparseView<Product<Lhs, Rhs, Options> > >
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(xpr.nestedExpression().lhs());
RhsNested rhsNested(xpr.nestedExpression().rhs());
-
+
internal::sparse_sparse_product_with_pruning_selector<typename remove_all<LhsNested>::type,
typename remove_all<RhsNested>::type, PlainObject>::run(lhsNested,rhsNested,m_result,
abs(xpr.reference())*xpr.epsilon());
}
-
-protected:
+
+protected:
PlainObject m_result;
};
diff --git a/Eigen/src/SparseCore/SparseRedux.h b/Eigen/src/SparseCore/SparseRedux.h
index 2a9718cfb..458774962 100644
--- a/Eigen/src/SparseCore/SparseRedux.h
+++ b/Eigen/src/SparseCore/SparseRedux.h
@@ -30,7 +30,10 @@ typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
SparseMatrix<_Scalar,_Options,_Index>::sum() const
{
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
- return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
+ if(this->isCompressed())
+ return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
+ else
+ return Base::sum();
}
template<typename _Scalar, int _Options, typename _Index>
diff --git a/Eigen/src/SparseCore/SparseSelfAdjointView.h b/Eigen/src/SparseCore/SparseSelfAdjointView.h
index b92bb17e2..d31d9babf 100644
--- a/Eigen/src/SparseCore/SparseSelfAdjointView.h
+++ b/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -218,18 +218,18 @@ struct SparseSelfAdjoint2Sparse {};
template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
-template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
-struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse, Scalar>
+template< typename DstXprType, typename SrcXprType, typename Functor>
+struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
{
typedef typename DstXprType::StorageIndex StorageIndex;
template<typename DestScalar,int StorageOrder>
- static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
+ static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
}
template<typename DestScalar>
- static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
+ static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
// TODO directly evaluate into dst;
SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
@@ -250,11 +250,11 @@ template<int Mode, typename SparseLhsType, typename DenseRhsType, typename Dense
inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
EIGEN_ONLY_USED_FOR_DEBUG(alpha);
- // TODO use alpha
- eigen_assert(alpha==AlphaType(1) && "alpha != 1 is not implemented yet, sorry");
- typedef evaluator<SparseLhsType> LhsEval;
- typedef typename evaluator<SparseLhsType>::InnerIterator LhsIterator;
+ typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
+ typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
+ typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
+ typedef typename LhsEval::InnerIterator LhsIterator;
typedef typename SparseLhsType::Scalar LhsScalar;
enum {
@@ -266,39 +266,53 @@ inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, cons
ProcessSecondHalf = !ProcessFirstHalf
};
- LhsEval lhsEval(lhs);
-
- for (Index j=0; j<lhs.outerSize(); ++j)
+ SparseLhsTypeNested lhs_nested(lhs);
+ LhsEval lhsEval(lhs_nested);
+
+ // work on one column at once
+ for (Index k=0; k<rhs.cols(); ++k)
{
- LhsIterator i(lhsEval,j);
- if (ProcessSecondHalf)
+ for (Index j=0; j<lhs.outerSize(); ++j)
{
- while (i && i.index()<j) ++i;
- if(i && i.index()==j)
+ LhsIterator i(lhsEval,j);
+ // handle diagonal coeff
+ if (ProcessSecondHalf)
{
- res.row(j) += i.value() * rhs.row(j);
- ++i;
+ while (i && i.index()<j) ++i;
+ if(i && i.index()==j)
+ {
+ res(j,k) += alpha * i.value() * rhs(j,k);
+ ++i;
+ }
}
+
+ // premultiplied rhs for scatters
+ typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
+ // accumulator for partial scalar product
+ typename DenseResType::Scalar res_j(0);
+ for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
+ {
+ LhsScalar lhs_ij = i.value();
+ if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
+ res_j += lhs_ij * rhs(i.index(),k);
+ res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
+ }
+ res(j,k) += alpha * res_j;
+
+ // handle diagonal coeff
+ if (ProcessFirstHalf && i && (i.index()==j))
+ res(j,k) += alpha * i.value() * rhs(j,k);
}
- for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
- {
- Index a = LhsIsRowMajor ? j : i.index();
- Index b = LhsIsRowMajor ? i.index() : j;
- LhsScalar v = i.value();
- res.row(a) += (v) * rhs.row(b);
- res.row(b) += numext::conj(v) * rhs.row(a);
- }
- if (ProcessFirstHalf && i && (i.index()==j))
- res.row(j) += i.value() * rhs.row(j);
}
}
template<typename LhsView, typename Rhs, int ProductType>
struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
+: generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
{
template<typename Dest>
- static void evalTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs)
+ static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
{
typedef typename LhsView::_MatrixTypeNested Lhs;
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
@@ -306,16 +320,16 @@ struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, Pr
LhsNested lhsNested(lhsView.matrix());
RhsNested rhsNested(rhs);
- dst.setZero();
- internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, typename Dest::Scalar(1));
+ internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
}
};
template<typename Lhs, typename RhsView, int ProductType>
struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
+: generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
{
template<typename Dest>
- static void evalTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView)
+ static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
{
typedef typename RhsView::_MatrixTypeNested Rhs;
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
@@ -323,10 +337,9 @@ struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, Pr
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhsView.matrix());
- dst.setZero();
- // transpoe everything
+ // transpose everything
Transpose<Dest> dstT(dst);
- internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, typename Dest::Scalar(1));
+ internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
}
};
@@ -586,12 +599,12 @@ class SparseSymmetricPermutationProduct
namespace internal {
template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
-struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar>, Sparse2Sparse>
+struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
{
typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
typedef typename DstXprType::StorageIndex DstIndex;
template<int Options>
- static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
+ static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
{
// internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
@@ -600,7 +613,7 @@ struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>
}
template<typename DestType,unsigned int DestMode>
- static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
+ static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
{
internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
}
diff --git a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
index 20078f72c..21c419002 100644
--- a/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
+++ b/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -51,7 +51,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
res.reserve(estimated_nnz_prod);
- double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols());
+ double ratioColRes = double(estimated_nnz_prod)/(double(lhs.rows())*double(rhs.cols()));
for (Index j=0; j<cols; ++j)
{
// FIXME:
diff --git a/Eigen/src/SparseCore/SparseVector.h b/Eigen/src/SparseCore/SparseVector.h
index 167a9886c..00ee6ec89 100644
--- a/Eigen/src/SparseCore/SparseVector.h
+++ b/Eigen/src/SparseCore/SparseVector.h
@@ -22,7 +22,7 @@ namespace Eigen {
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
*
* This class can be extended with the help of the plugin mechanism described on the page
- * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
*/
namespace internal {