// This file is part of Eigen, a lightweight C++ template library // for linear algebra. Eigen itself is part of the KDE project. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2008 Gael Guennebaud // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 3 of the License, or (at your option) any later version. // // Alternatively, you can redistribute it and/or // modify it under the terms of the GNU General Public License as // published by the Free Software Foundation; either version 2 of // the License, or (at your option) any later version. // // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the // GNU General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License and a copy of the GNU General Public License along with // Eigen. If not, see . #ifndef EIGEN_PRODUCT_H #define EIGEN_PRODUCT_H template struct ei_product_impl { inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) { ei_product_impl::run(row, col, lhs, rhs, res); res += lhs.coeff(row, Index) * rhs.coeff(Index, col); } }; template struct ei_product_impl<0, Size, Lhs, Rhs> { inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) { res = lhs.coeff(row, 0) * rhs.coeff(0, col); } }; template struct ei_product_impl { inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar& res) { res = lhs.coeff(row, 0) * rhs.coeff(0, col); for(int i = 1; i < lhs.cols(); i++) res += lhs.coeff(row, i) * rhs.coeff(i, col); } }; // prevent buggy user code from causing an infinite recursion template struct ei_product_impl { inline static void run(int, int, const Lhs&, const Rhs&, typename Lhs::Scalar&) {} }; //---------- template struct ei_packet_product_impl; template struct ei_packet_product_impl { inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) { ei_packet_product_impl::run(row, col, lhs, rhs, res); res = ei_pmadd(ei_pset1(lhs.coeff(row, Index)), rhs.template packetCoeff(Index, col), res); } }; template struct ei_packet_product_impl { inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) { ei_packet_product_impl::run(row, col, lhs, rhs, res); res = ei_pmadd(lhs.template packetCoeff(row, Index), ei_pset1(rhs.coeff(Index, col)), res); } }; template struct ei_packet_product_impl { inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) { res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packetCoeff(0, col)); } }; template struct ei_packet_product_impl { inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) { res = ei_pmul(lhs.template packetCoeff(row, 0), ei_pset1(rhs.coeff(0, col))); } }; template struct ei_packet_product_impl { inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res) { res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packetCoeff(0, col)); for(int i = 1; i < lhs.cols(); i++) res = ei_pmadd(ei_pset1(lhs.coeff(row, i)), rhs.template packetCoeff(i, col), res); } }; template struct ei_packet_product_impl { inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res) { res = ei_pmul(lhs.template packetCoeff(row, 0), ei_pset1(rhs.coeff(0, col))); for(int i = 1; i < lhs.cols(); i++) res = ei_pmadd(lhs.template packetCoeff(row, i), ei_pset1(rhs.coeff(i, col)), res); } }; /** \class Product * * \brief Expression of the product of two matrices * * \param Lhs the type of the left-hand side * \param Rhs the type of the right-hand side * \param EvalMode internal use only * * This class represents an expression of the product of two matrices. * It is the return type of the operator* between matrices, and most of the time * this is the only way it is used. */ template struct ei_product_eval_mode { enum{ value = Lhs::MaxRowsAtCompileTime >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD && Rhs::MaxColsAtCompileTime >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD && Lhs::MaxColsAtCompileTime >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD && (Rhs::Flags&Diagonal)!=Diagonal ? CacheFriendlyProduct : NormalProduct }; }; template class ei_product_eval_to_column_major { typedef typename ei_traits::Scalar _Scalar; enum {_MaxRows = ei_traits::MaxRowsAtCompileTime, _MaxCols = ei_traits::MaxColsAtCompileTime, _Flags = ei_traits::Flags }; public: typedef Matrix<_Scalar, ei_traits::RowsAtCompileTime, ei_traits::ColsAtCompileTime, ei_traits::MaxRowsAtCompileTime, ei_traits::MaxColsAtCompileTime, ei_corrected_matrix_flags< _Scalar, ei_size_at_compile_time<_MaxRows,_MaxCols>::ret, _Flags >::ret & ~RowMajorBit > type; }; // as ei_nested, but evaluate to a column-major matrix if an evaluation is required template struct ei_product_nested_rhs { typedef typename ei_meta_if< ei_must_nest_by_value::ret, T, typename ei_meta_if< ((ei_traits::Flags & EvalBeforeNestingBit) || (n+1) * (NumTraits::Scalar>::ReadCost) < (n-1) * T::CoeffReadCost), typename ei_product_eval_to_column_major::type, const T& >::ret >::ret type; }; template struct ei_traits > { typedef typename Lhs::Scalar Scalar; typedef typename ei_nested::type LhsNested; typedef typename ei_meta_if::type, typename ei_nested::type>::ret RhsNested; typedef typename ei_unconst::type>::type _LhsNested; typedef typename ei_unconst::type>::type _RhsNested; enum { LhsCoeffReadCost = _LhsNested::CoeffReadCost, RhsCoeffReadCost = _RhsNested::CoeffReadCost, LhsFlags = _LhsNested::Flags, RhsFlags = _RhsNested::Flags, RowsAtCompileTime = Lhs::RowsAtCompileTime, ColsAtCompileTime = Rhs::ColsAtCompileTime, MaxRowsAtCompileTime = Lhs::MaxRowsAtCompileTime, MaxColsAtCompileTime = Rhs::MaxColsAtCompileTime, // the vectorization flags are only used by the normal product, // the other one is always vectorized ! _RhsVectorizable = (RhsFlags & RowMajorBit) && (RhsFlags & VectorizableBit) && (ColsAtCompileTime % ei_packet_traits::size == 0), _LhsVectorizable = (!(LhsFlags & RowMajorBit)) && (LhsFlags & VectorizableBit) && (RowsAtCompileTime % ei_packet_traits::size == 0), _Vectorizable = (_LhsVectorizable || _RhsVectorizable) ? 1 : 0, _RowMajor = (RhsFlags & RowMajorBit) && (EvalMode==(int)CacheFriendlyProduct ? (int)LhsFlags & RowMajorBit : (!_LhsVectorizable)), _LostBits = ~((_RowMajor ? 0 : RowMajorBit) | ((RowsAtCompileTime == Dynamic || ColsAtCompileTime == Dynamic) ? 0 : LargeBit)), Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & _LostBits) | EvalBeforeAssigningBit | EvalBeforeNestingBit | (_Vectorizable ? VectorizableBit : 0), CoeffReadCost = Lhs::ColsAtCompileTime == Dynamic ? Dynamic : Lhs::ColsAtCompileTime * (NumTraits::MulCost + LhsCoeffReadCost + RhsCoeffReadCost) + (Lhs::ColsAtCompileTime - 1) * NumTraits::AddCost }; }; template class Product : ei_no_assignment_operator, public MatrixBase > { public: EIGEN_GENERIC_PUBLIC_INTERFACE(Product) typedef typename ei_traits::LhsNested LhsNested; typedef typename ei_traits::RhsNested RhsNested; typedef typename ei_traits::_LhsNested _LhsNested; typedef typename ei_traits::_RhsNested _RhsNested; enum { PacketSize = ei_packet_traits::size }; inline Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) { ei_assert(lhs.cols() == rhs.rows()); } /** \internal */ template void _cacheFriendlyEval(DestDerived& res) const; /** \internal */ template void _cacheFriendlyEvalAndAdd(DestDerived& res) const; private: inline int _rows() const { return m_lhs.rows(); } inline int _cols() const { return m_rhs.cols(); } const Scalar _coeff(int row, int col) const { if ((Rhs::Flags&Diagonal)==Diagonal) { return m_lhs.coeff(row, col) * m_rhs.coeff(col, col); } else { Scalar res; const bool unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT; ei_product_impl ::run(row, col, m_lhs, m_rhs, res); return res; } } template const PacketScalar _packetCoeff(int row, int col) const { if ((Rhs::Flags&Diagonal)==Diagonal) { assert((_LhsNested::Flags&RowMajorBit)==0); return ei_pmul(m_lhs.template packetCoeff(row, col), ei_pset1(m_rhs.coeff(col, col))); } else { const bool unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT; PacketScalar res; ei_packet_product_impl ::run(row, col, m_lhs, m_rhs, res); return res; } } template friend struct ei_cache_friendly_selector; protected: const LhsNested m_lhs; const RhsNested m_rhs; }; /** \returns the matrix product of \c *this and \a other. * * \sa lazy(), operator*=(const MatrixBase&) */ template template inline const typename MatrixBase::template ProductReturnType::Type MatrixBase::operator*(const MatrixBase &other) const { assert( (Derived::Flags&ArrayBit) == (OtherDerived::Flags&ArrayBit) ); return typename ProductReturnType::Type(derived(), other.derived()); } /** replaces \c *this by \c *this * \a other. * * \returns a reference to \c *this */ template template inline Derived & MatrixBase::operator*=(const MatrixBase &other) { return *this = *this * other; } /** \internal */ template template inline Derived& MatrixBase::operator+=(const Flagged, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other) { other._expression()._cacheFriendlyEvalAndAdd(const_cast_derived()); return derived(); } template template inline Derived& MatrixBase::lazyAssign(const Product& product) { product._cacheFriendlyEval(derived()); return derived(); } template struct ei_product_copy_rhs { typedef typename ei_meta_if< (ei_traits::Flags & RowMajorBit) || (!(ei_traits::Flags & DirectAccessBit)), typename ei_product_eval_to_column_major::type, const T& >::ret type; }; template struct ei_product_copy_lhs { typedef typename ei_meta_if< (!(int(ei_traits::Flags) & DirectAccessBit)), typename ei_eval::type, const T& >::ret type; }; template template inline void Product::_cacheFriendlyEval(DestDerived& res) const { if ( _rows()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD && _cols()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD && m_lhs.cols()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD ) { res.setZero(); typedef typename ei_product_copy_lhs<_LhsNested>::type LhsCopy; typedef typename ei_unref::type _LhsCopy; typedef typename ei_product_copy_rhs<_RhsNested>::type RhsCopy; typedef typename ei_unref::type _RhsCopy; LhsCopy lhs(m_lhs); RhsCopy rhs(m_rhs); ei_cache_friendly_product( _rows(), _cols(), lhs.cols(), _LhsCopy::Flags&RowMajorBit, &(lhs.const_cast_derived().coeffRef(0,0)), lhs.stride(), _RhsCopy::Flags&RowMajorBit, &(rhs.const_cast_derived().coeffRef(0,0)), rhs.stride(), Flags&RowMajorBit, &(res.coeffRef(0,0)), res.stride() ); } else { res = Product<_LhsNested,_RhsNested,NormalProduct>(m_lhs, m_rhs).lazy(); } } template template inline void Product::_cacheFriendlyEvalAndAdd(DestDerived& res) const { if ( _rows()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD && _cols()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD && m_lhs.cols()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD ) { typedef typename ei_product_copy_lhs<_LhsNested>::type LhsCopy; typedef typename ei_unref::type _LhsCopy; typedef typename ei_product_copy_rhs<_RhsNested>::type RhsCopy; typedef typename ei_unref::type _RhsCopy; LhsCopy lhs(m_lhs); RhsCopy rhs(m_rhs); ei_cache_friendly_product( _rows(), _cols(), lhs.cols(), _LhsCopy::Flags&RowMajorBit, &(lhs.const_cast_derived().coeffRef(0,0)), lhs.stride(), _RhsCopy::Flags&RowMajorBit, &(rhs.const_cast_derived().coeffRef(0,0)), rhs.stride(), Flags&RowMajorBit, &(res.coeffRef(0,0)), res.stride() ); } else { res += Product<_LhsNested,_RhsNested,NormalProduct>(m_lhs, m_rhs).lazy(); } } #endif // EIGEN_PRODUCT_H