aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen/src/SparseCore/SparseAssign.h
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2016-06-06 15:11:41 +0200
committerGravatar Gael Guennebaud <g.gael@free.fr>2016-06-06 15:11:41 +0200
commit66e99ab6a1444d8e3d47211e4540837e6b982a3a (patch)
tree8435cfab07d5c788df3533569c3c29a0f8be4ba5 /Eigen/src/SparseCore/SparseAssign.h
parent1f1e0b9e30a175c7a3197ffc87898404dda7c45e (diff)
Relax mixing-type constraints for binary coefficient-wise operators:
- Replace internal::scalar_product_traits<A,B> by Eigen::ScalarBinaryOpTraits<A,B,OP> - Remove the "functor_is_product_like" helper (was pretty ugly) - Currently, OP is not used, but it is available to the user for fine grained tuning - Currently, only the following operators have been generalized: *,/,+,-,=,*=,/=,+=,-= - TODO: generalize all other binray operators (comparisons,pow,etc.) - TODO: handle "scalar op array" operators (currently only * is handled) - TODO: move the handling of the "void" scalar type to ScalarBinaryOpTraits
Diffstat (limited to 'Eigen/src/SparseCore/SparseAssign.h')
-rw-r--r--Eigen/src/SparseCore/SparseAssign.h20
1 files changed, 10 insertions, 10 deletions
diff --git a/Eigen/src/SparseCore/SparseAssign.h b/Eigen/src/SparseCore/SparseAssign.h
index 4a8dd12e4..b284fa9e4 100644
--- a/Eigen/src/SparseCore/SparseAssign.h
+++ b/Eigen/src/SparseCore/SparseAssign.h
@@ -34,8 +34,8 @@ template<typename OtherDerived>
inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
{
// by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine
- internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar> >
- ::run(derived(), other.derived(), internal::assign_op<Scalar>());
+ internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> >
+ ::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}
@@ -127,7 +127,7 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
template< typename DstXprType, typename SrcXprType, typename Functor, typename Scalar>
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse, Scalar>
{
- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
assign_sparse_to_sparse(dst.derived(), src.derived());
}
@@ -141,7 +141,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Scalar>
{
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
- if(internal::is_same<Functor,internal::assign_op<Scalar> >::value)
+ if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)
dst.setZero();
internal::evaluator<SrcXprType> srcEval(src);
@@ -156,10 +156,10 @@ struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Scalar>
// Specialization for "dst = dec.solve(rhs)"
// NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
-struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar>, Sparse2Sparse, Scalar>
+struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse, Scalar>
{
typedef Solve<DecType,RhsType> SrcXprType;
- static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar> &)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
{
src.dec()._solve_impl(src.rhs(), dst);
}
@@ -176,7 +176,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse, Scalar>
typedef Array<StorageIndex,Dynamic,1> ArrayXI;
typedef Array<Scalar,Dynamic,1> ArrayXS;
template<int Options>
- static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
+ static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
Index size = src.diagonal().size();
dst.makeCompressed();
@@ -187,15 +187,15 @@ struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse, Scalar>
}
template<typename DstDerived>
- static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar> &/*func*/)
+ static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{
dst.diagonal() = src.diagonal();
}
- static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar> &/*func*/)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{ dst.diagonal() += src.diagonal(); }
- static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar> &/*func*/)
+ static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
{ dst.diagonal() -= src.diagonal(); }
};
} // end namespace internal