From 7b80609d496ae50741d1866f2a9ef9149c4edc04 Mon Sep 17 00:00:00 2001 From: acxz <5379597-acxz@users.noreply.gitlab.com> Date: Tue, 1 Dec 2020 03:08:47 +0000 Subject: add EIGEN_DEVICE_FUNC to methods --- Eigen/src/Core/ProductEvaluators.h | 148 ++++++++++++++++++------------------- 1 file changed, 74 insertions(+), 74 deletions(-) (limited to 'Eigen/src/Core/ProductEvaluators.h') diff --git a/Eigen/src/Core/ProductEvaluators.h b/Eigen/src/Core/ProductEvaluators.h index 6e220d736..02b58438c 100644 --- a/Eigen/src/Core/ProductEvaluators.h +++ b/Eigen/src/Core/ProductEvaluators.h @@ -14,7 +14,7 @@ #define EIGEN_PRODUCTEVALUATORS_H namespace Eigen { - + namespace internal { /** \internal @@ -22,19 +22,19 @@ namespace internal { * Since products require special treatments to handle all possible cases, * we simply defer the evaluation logic to a product_evaluator class * which offers more partial specialization possibilities. - * + * * \sa class product_evaluator */ template -struct evaluator > +struct evaluator > : public product_evaluator > { typedef Product XprType; typedef product_evaluator Base; - + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(xpr) {} }; - + // Catch "scalar * ( A * B )" and transform it to "(A*scalar) * B" // TODO we should apply that rule only if that's really helpful template @@ -62,12 +62,12 @@ struct evaluator, template -struct evaluator, DiagIndex> > +struct evaluator, DiagIndex> > : public evaluator, DiagIndex> > { typedef Diagonal, DiagIndex> XprType; typedef evaluator, DiagIndex> > Base; - + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit evaluator(const XprType& xpr) : Base(Diagonal, DiagIndex>( Product(xpr.nestedExpression().lhs(), xpr.nestedExpression().rhs()), @@ -108,23 +108,23 @@ struct product_evaluator, ProductTag, LhsShape, RhsSh : m_result(xpr.rows(), xpr.cols()) { ::new (static_cast(this)) Base(m_result); - + // FIXME shall we handle nested_eval here?, // if so, then we must take care at removing the call to nested_eval in the specializations (e.g., in permutation_matrix_product, transposition_matrix_product, etc.) // typedef typename internal::nested_eval::type LhsNested; // typedef typename internal::nested_eval::type RhsNested; // typedef typename internal::remove_all::type LhsNestedCleaned; // typedef typename internal::remove_all::type RhsNestedCleaned; -// +// // const LhsNested lhs(xpr.lhs()); // const RhsNested rhs(xpr.rhs()); -// +// // generic_product_impl::evalTo(m_result, lhs, rhs); generic_product_impl::evalTo(m_result, xpr.lhs(), xpr.rhs()); } - -protected: + +protected: PlainObject m_result; }; @@ -250,7 +250,7 @@ struct generic_product_impl { dst.coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum(); } - + template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { @@ -298,7 +298,7 @@ struct generic_product_impl { template struct is_row_major : internal::conditional<(int(T::Flags)&RowMajorBit), internal::true_type, internal::false_type>::type {}; typedef typename Product::Scalar Scalar; - + // TODO it would be nice to be able to exploit our *_assign_op functors for that purpose struct set { template EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() = src; } }; struct add { template EIGEN_DEVICE_FUNC void operator()(const Dst& dst, const Src& src) const { dst.const_cast_derived() += src; } }; @@ -310,31 +310,31 @@ struct generic_product_impl dst.const_cast_derived() += m_scale * src; } }; - + template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { internal::outer_product_selector_run(dst, lhs, rhs, set(), is_row_major()); } - + template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { internal::outer_product_selector_run(dst, lhs, rhs, add(), is_row_major()); } - + template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { internal::outer_product_selector_run(dst, lhs, rhs, sub(), is_row_major()); } - + template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { internal::outer_product_selector_run(dst, lhs, rhs, adds(alpha), is_row_major()); } - + }; @@ -343,7 +343,7 @@ template struct generic_product_impl_base { typedef typename Product::Scalar Scalar; - + template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } @@ -355,7 +355,7 @@ struct generic_product_impl_base template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } - + template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { Derived::scaleAndAddTo(dst,lhs,rhs,alpha); } @@ -379,7 +379,7 @@ struct generic_product_impl if (lhs.rows() == 1 && rhs.cols() == 1) { dst.coeffRef(0,0) += alpha * lhs.row(0).conjugate().dot(rhs.col(0)); return; - } + } LhsNested actual_lhs(lhs); RhsNested actual_rhs(rhs); internal::gemv_dense_selector }; template -struct generic_product_impl +struct generic_product_impl { typedef typename Product::Scalar Scalar; - + template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { @@ -408,7 +408,7 @@ struct generic_product_impl // dst.noalias() += lhs.lazyProduct(rhs); call_assignment_no_alias(dst, lhs.lazyProduct(rhs), internal::add_assign_op()); } - + template static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { @@ -525,7 +525,7 @@ struct product_evaluator, ProductTag, DenseShape, typedef typename internal::nested_eval::type LhsNested; typedef typename internal::nested_eval::type RhsNested; - + typedef typename internal::remove_all::type LhsNestedCleaned; typedef typename internal::remove_all::type RhsNestedCleaned; @@ -544,7 +544,7 @@ struct product_evaluator, ProductTag, DenseShape, typedef typename find_best_packet::type RhsVecPacketType; enum { - + LhsCoeffReadCost = LhsEtorType::CoeffReadCost, RhsCoeffReadCost = RhsEtorType::CoeffReadCost, CoeffReadCost = InnerSize==0 ? NumTraits::ReadCost @@ -553,10 +553,10 @@ struct product_evaluator, ProductTag, DenseShape, + (InnerSize - 1) * NumTraits::AddCost, Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT, - + LhsFlags = LhsEtorType::Flags, RhsFlags = RhsEtorType::Flags, - + LhsRowMajor = LhsFlags & RowMajorBit, RhsRowMajor = RhsFlags & RowMajorBit, @@ -566,7 +566,7 @@ struct product_evaluator, ProductTag, DenseShape, // Here, we don't care about alignment larger than the usable packet size. LhsAlignment = EIGEN_PLAIN_ENUM_MIN(LhsEtorType::Alignment,LhsVecPacketSize*int(sizeof(typename LhsNestedCleaned::Scalar))), RhsAlignment = EIGEN_PLAIN_ENUM_MIN(RhsEtorType::Alignment,RhsVecPacketSize*int(sizeof(typename RhsNestedCleaned::Scalar))), - + SameType = is_same::value, CanVectorizeRhs = bool(RhsRowMajor) && (RhsFlags & PacketAccessBit) && (ColsAtCompileTime!=1), @@ -581,7 +581,7 @@ struct product_evaluator, ProductTag, DenseShape, // TODO enable vectorization for mixed types | (SameType && (CanVectorizeLhs || CanVectorizeRhs) ? PacketAccessBit : 0) | (XprType::IsVectorAtCompileTime ? LinearAccessBit : 0), - + LhsOuterStrideBytes = int(LhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename LhsNestedCleaned::Scalar)), RhsOuterStrideBytes = int(RhsNestedCleaned::OuterStrideAtCompileTime) * int(sizeof(typename RhsNestedCleaned::Scalar)), @@ -600,7 +600,7 @@ struct product_evaluator, ProductTag, DenseShape, && (LhsFlags & RhsFlags & ActualPacketAccessBit) && (InnerSize % packet_traits::size == 0) }; - + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const { return (m_lhs.row(row).transpose().cwiseProduct( m_rhs.col(col) )).sum(); @@ -642,7 +642,7 @@ struct product_evaluator, ProductTag, DenseShape, protected: typename internal::add_const_on_value_type::type m_lhs; typename internal::add_const_on_value_type::type m_rhs; - + LhsEtorType m_lhsImpl; RhsEtorType m_rhsImpl; @@ -673,7 +673,7 @@ struct product_evaluator, LazyCoeffBasedProduc template struct etor_product_packet_impl { - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) { etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); res = pmadd(pset1(lhs.coeff(row, Index(UnrollingIndex-1))), rhs.template packet(Index(UnrollingIndex-1), col), res); @@ -683,7 +683,7 @@ struct etor_product_packet_impl struct etor_product_packet_impl { - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res) { etor_product_packet_impl::run(row, col, lhs, rhs, innerDim, res); res = pmadd(lhs.template packet(row, Index(UnrollingIndex-1)), pset1(rhs.coeff(Index(UnrollingIndex-1), col)), res); @@ -693,7 +693,7 @@ struct etor_product_packet_impl struct etor_product_packet_impl { - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) { res = pmul(pset1(lhs.coeff(row, Index(0))),rhs.template packet(Index(0), col)); } @@ -702,7 +702,7 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res) { res = pmul(lhs.template packet(row, Index(0)), pset1(rhs.coeff(Index(0), col))); } @@ -711,7 +711,7 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res) { res = pset1(typename unpacket_traits::type(0)); } @@ -720,7 +720,7 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - static EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& /*lhs*/, const Rhs& /*rhs*/, Index /*innerDim*/, Packet &res) { res = pset1(typename unpacket_traits::type(0)); } @@ -729,7 +729,7 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) { res = pset1(typename unpacket_traits::type(0)); for(Index i = 0; i < innerDim; ++i) @@ -740,7 +740,7 @@ struct etor_product_packet_impl template struct etor_product_packet_impl { - static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res) { res = pset1(typename unpacket_traits::type(0)); for(Index i = 0; i < innerDim; ++i) @@ -762,7 +762,7 @@ struct generic_product_impl : generic_product_impl_base > { typedef typename Product::Scalar Scalar; - + template static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { @@ -776,7 +776,7 @@ struct generic_product_impl : generic_product_impl_base > { typedef typename Product::Scalar Scalar; - + template static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { @@ -797,7 +797,7 @@ struct generic_product_impl : generic_product_impl_base > { typedef typename Product::Scalar Scalar; - + template static EIGEN_DEVICE_FUNC void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) @@ -811,7 +811,7 @@ struct generic_product_impl : generic_product_impl_base > { typedef typename Product::Scalar Scalar; - + template static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { @@ -823,7 +823,7 @@ struct generic_product_impl /*************************************************************************** * Diagonal products ***************************************************************************/ - + template struct diagonal_product_evaluator_base : evaluator_base @@ -832,10 +832,10 @@ struct diagonal_product_evaluator_base public: enum { CoeffReadCost = NumTraits::MulCost + evaluator::CoeffReadCost + evaluator::CoeffReadCost, - + MatrixFlags = evaluator::Flags, DiagFlags = evaluator::Flags, - + _StorageOrder = (Derived::MaxRowsAtCompileTime==1 && Derived::MaxColsAtCompileTime!=1) ? RowMajor : (Derived::MaxColsAtCompileTime==1 && Derived::MaxRowsAtCompileTime!=1) ? ColMajor : MatrixFlags & RowMajorBit ? RowMajor : ColMajor, @@ -858,14 +858,14 @@ public: || (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::RowsAtCompileTime==1 && ProductOrder==OnTheLeft) || (DiagonalType::SizeAtCompileTime==Dynamic && MatrixType::ColsAtCompileTime==1 && ProductOrder==OnTheRight) }; - + EIGEN_DEVICE_FUNC diagonal_product_evaluator_base(const MatrixType &mat, const DiagonalType &diag) : m_diagImpl(diag), m_matImpl(mat) { EIGEN_INTERNAL_CHECK_COST_VALUE(NumTraits::MulCost); EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } - + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index idx) const { if(AsScalarProduct) @@ -873,7 +873,7 @@ public: else return m_diagImpl.coeff(idx) * m_matImpl.coeff(idx); } - + protected: template EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::true_type) const @@ -881,7 +881,7 @@ protected: return internal::pmul(m_matImpl.template packet(row, col), internal::pset1(m_diagImpl.coeff(id))); } - + template EIGEN_STRONG_INLINE PacketType packet_impl(Index row, Index col, Index id, internal::false_type) const { @@ -892,7 +892,7 @@ protected: return internal::pmul(m_matImpl.template packet(row, col), m_diagImpl.template packet(id)); } - + evaluator m_diagImpl; evaluator m_matImpl; }; @@ -907,24 +907,24 @@ struct product_evaluator, ProductTag, DiagonalSha using Base::m_matImpl; using Base::coeff; typedef typename Base::Scalar Scalar; - + typedef Product XprType; typedef typename XprType::PlainObject PlainObject; typedef typename Lhs::DiagonalVectorType DiagonalType; - + enum { StorageOrder = Base::_StorageOrder }; EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) : Base(xpr.rhs(), xpr.lhs().diagonal()) { } - + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const { return m_diagImpl.coeff(row) * m_matImpl.coeff(row, col); } - + #ifndef EIGEN_GPUCC template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const @@ -934,7 +934,7 @@ struct product_evaluator, ProductTag, DiagonalSha return this->template packet_impl(row,col, row, typename internal::conditional::type()); } - + template EIGEN_STRONG_INLINE PacketType packet(Index idx) const { @@ -953,22 +953,22 @@ struct product_evaluator, ProductTag, DenseShape, using Base::m_matImpl; using Base::coeff; typedef typename Base::Scalar Scalar; - + typedef Product XprType; typedef typename XprType::PlainObject PlainObject; - + enum { StorageOrder = Base::_StorageOrder }; EIGEN_DEVICE_FUNC explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs().diagonal()) { } - + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const { return m_matImpl.coeff(row, col) * m_diagImpl.coeff(col); } - + #ifndef EIGEN_GPUCC template EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const @@ -976,7 +976,7 @@ struct product_evaluator, ProductTag, DenseShape, return this->template packet_impl(row,col, col, typename internal::conditional::type()); } - + template EIGEN_STRONG_INLINE PacketType packet(Index idx) const { @@ -1004,7 +1004,7 @@ struct permutation_matrix_product typedef typename remove_all::type MatrixTypeCleaned; template - static inline void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr) { MatrixType mat(xpr); const Index n = Side==OnTheLeft ? mat.rows() : mat.cols(); @@ -1058,7 +1058,7 @@ template struct generic_product_impl { template - static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { permutation_matrix_product::run(dst, lhs, rhs); } @@ -1068,7 +1068,7 @@ template struct generic_product_impl { template - static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { permutation_matrix_product::run(dst, rhs, lhs); } @@ -1078,7 +1078,7 @@ template struct generic_product_impl, Rhs, PermutationShape, MatrixShape, ProductTag> { template - static void evalTo(Dest& dst, const Inverse& lhs, const Rhs& rhs) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Inverse& lhs, const Rhs& rhs) { permutation_matrix_product::run(dst, lhs.nestedExpression(), rhs); } @@ -1088,7 +1088,7 @@ template struct generic_product_impl, MatrixShape, PermutationShape, ProductTag> { template - static void evalTo(Dest& dst, const Lhs& lhs, const Inverse& rhs) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Inverse& rhs) { permutation_matrix_product::run(dst, rhs.nestedExpression(), lhs); } @@ -1110,9 +1110,9 @@ struct transposition_matrix_product { typedef typename nested_eval::type MatrixType; typedef typename remove_all::type MatrixTypeCleaned; - + template - static inline void run(Dest& dst, const TranspositionType& tr, const ExpressionType& xpr) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(Dest& dst, const TranspositionType& tr, const ExpressionType& xpr) { MatrixType mat(xpr); typedef typename TranspositionType::StorageIndex StorageIndex; @@ -1135,7 +1135,7 @@ template struct generic_product_impl { template - static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { transposition_matrix_product::run(dst, lhs, rhs); } @@ -1145,7 +1145,7 @@ template struct generic_product_impl { template - static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs) { transposition_matrix_product::run(dst, rhs, lhs); } @@ -1156,7 +1156,7 @@ template struct generic_product_impl, Rhs, TranspositionsShape, MatrixShape, ProductTag> { template - static void evalTo(Dest& dst, const Transpose& lhs, const Rhs& rhs) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Transpose& lhs, const Rhs& rhs) { transposition_matrix_product::run(dst, lhs.nestedExpression(), rhs); } @@ -1166,7 +1166,7 @@ template struct generic_product_impl, MatrixShape, TranspositionsShape, ProductTag> { template - static void evalTo(Dest& dst, const Lhs& lhs, const Transpose& rhs) + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalTo(Dest& dst, const Lhs& lhs, const Transpose& rhs) { transposition_matrix_product::run(dst, rhs.nestedExpression(), lhs); } -- cgit v1.2.3