// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2006-2008 Benoit Jacob // Copyright (C) 2008-2011 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_PRODUCT_H #define EIGEN_GENERAL_PRODUCT_H namespace Eigen { enum { Large = 2, Small = 3 }; namespace internal { template struct product_type_selector; template struct product_size_category { enum { is_large = MaxSize == Dynamic || Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD, value = is_large ? Large : Size == 1 ? 1 : Small }; }; template struct product_type { typedef typename remove_all::type _Lhs; typedef typename remove_all::type _Rhs; enum { MaxRows = traits<_Lhs>::MaxRowsAtCompileTime, Rows = traits<_Lhs>::RowsAtCompileTime, MaxCols = traits<_Rhs>::MaxColsAtCompileTime, Cols = traits<_Rhs>::ColsAtCompileTime, MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(traits<_Lhs>::MaxColsAtCompileTime, traits<_Rhs>::MaxRowsAtCompileTime), Depth = EIGEN_SIZE_MIN_PREFER_FIXED(traits<_Lhs>::ColsAtCompileTime, traits<_Rhs>::RowsAtCompileTime) }; // the splitting into different lines of code here, introducing the _select enums and the typedef below, // is to work around an internal compiler error with gcc 4.1 and 4.2. private: enum { rows_select = product_size_category::value, cols_select = product_size_category::value, depth_select = product_size_category::value }; typedef product_type_selector selector; public: enum { value = selector::ret, ret = selector::ret }; #ifdef EIGEN_DEBUG_PRODUCT static void debug() { EIGEN_DEBUG_VAR(Rows); EIGEN_DEBUG_VAR(Cols); EIGEN_DEBUG_VAR(Depth); EIGEN_DEBUG_VAR(rows_select); EIGEN_DEBUG_VAR(cols_select); EIGEN_DEBUG_VAR(depth_select); EIGEN_DEBUG_VAR(value); } #endif }; // template struct product_tag // { // private: // // typedef typename remove_all::type _Lhs; // typedef typename remove_all::type _Rhs; // enum { // Rows = _Lhs::RowsAtCompileTime, // Cols = _Rhs::ColsAtCompileTime, // Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::ColsAtCompileTime, _Rhs::RowsAtCompileTime) // }; // // enum { // rows_select = Rows==1 ? int(Rows) : int(Large), // cols_select = Cols==1 ? int(Cols) : int(Large), // depth_select = Depth==1 ? int(Depth) : int(Large) // }; // typedef product_type_selector selector; // // public: // enum { // ret = selector::ret // }; // // }; /* The following allows to select the kind of product at compile time * based on the three dimensions of the product. * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */ // FIXME I'm not sure the current mapping is the ideal one. template struct product_type_selector { enum { ret = OuterProduct }; }; template struct product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; }; template<> struct product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; template<> struct product_type_selector<1, Large,Small> { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; }; template<> struct product_type_selector<1, Small,Large> { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = GemvProduct }; }; template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; template<> struct product_type_selector { enum { ret = GemmProduct }; }; } // end namespace internal /*********************************************************************** * Implementation of Inner Vector Vector Product ***********************************************************************/ // FIXME : maybe the "inner product" could return a Scalar // instead of a 1x1 matrix ?? // Pro: more natural for the user // Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix // product ends up to a row-vector times col-vector product... To tackle this use // case, we could have a specialization for Block with: operator=(Scalar x); /*********************************************************************** * Implementation of Outer Vector Vector Product ***********************************************************************/ /*********************************************************************** * Implementation of General Matrix Vector Product ***********************************************************************/ /* According to the shape/flags of the matrix we have to distinghish 3 different cases: * 1 - the matrix is col-major, BLAS compatible and M is large => call fast BLAS-like colmajor routine * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine * 3 - all other cases are handled using a simple loop along the outer-storage direction. * Therefore we need a lower level meta selector. * Furthermore, if the matrix is the rhs, then the product has to be transposed. */ namespace internal { template struct gemv_dense_sense_selector; } // end namespace internal namespace internal { template struct gemv_static_vector_if; template struct gemv_static_vector_if { EIGEN_STRONG_INLINE Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; } }; template struct gemv_static_vector_if { EIGEN_STRONG_INLINE Scalar* data() { return 0; } }; template struct gemv_static_vector_if { #if EIGEN_ALIGN_STATICALLY internal::plain_array m_data; EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; } #else // Some architectures cannot align on the stack, // => let's manually enforce alignment by allocating more data and return the address of the first aligned element. enum { ForceAlignment = internal::packet_traits::Vectorizable, PacketSize = internal::packet_traits::size }; internal::plain_array m_data; EIGEN_STRONG_INLINE Scalar* data() { return ForceAlignment ? reinterpret_cast((reinterpret_cast(m_data.array) & ~(size_t(EIGEN_ALIGN_BYTES-1))) + EIGEN_ALIGN_BYTES) : m_data.array; } #endif }; // The vector is on the left => transposition template struct gemv_dense_sense_selector { template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { Transpose destT(dest); enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor }; gemv_dense_sense_selector ::run(rhs.transpose(), lhs.transpose(), destT, alpha); } }; template<> struct gemv_dense_sense_selector { template static inline void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { typedef typename Dest::Index Index; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef typename Dest::Scalar ResScalar; typedef typename Dest::RealScalar RealScalar; typedef internal::blas_traits LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef internal::blas_traits RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef Map, Aligned> MappedDest; ActualLhsType actualLhs = LhsBlasTraits::extract(lhs); ActualRhsType actualRhs = RhsBlasTraits::extract(rhs); ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs) * RhsBlasTraits::extractScalarFactor(rhs); enum { // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 // on, the other hand it is good for the cache to pack the vector anyways... EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1, ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal }; gemv_static_vector_if static_dest; bool alphaIsCompatible = (!ComplexByReal) || (numext::imag(actualAlpha)==RealScalar(0)); bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; RhsScalar compatibleAlpha = get_factor::run(actualAlpha); ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), evalToDest ? dest.data() : static_dest.data()); if(!evalToDest) { #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN int size = dest.size(); EIGEN_DENSE_STORAGE_CTOR_PLUGIN #endif if(!alphaIsCompatible) { MappedDest(actualDestPtr, dest.size()).setZero(); compatibleAlpha = RhsScalar(1); } else MappedDest(actualDestPtr, dest.size()) = dest; } general_matrix_vector_product ::run( actualLhs.rows(), actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhs.data(), actualRhs.innerStride(), actualDestPtr, 1, compatibleAlpha); if (!evalToDest) { if(!alphaIsCompatible) dest += actualAlpha * MappedDest(actualDestPtr, dest.size()); else dest = MappedDest(actualDestPtr, dest.size()); } } }; template<> struct gemv_dense_sense_selector { template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { typedef typename Dest::Index Index; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef typename Dest::Scalar ResScalar; typedef internal::blas_traits LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef internal::blas_traits RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all::type ActualRhsTypeCleaned; typename add_const::type actualLhs = LhsBlasTraits::extract(lhs); typename add_const::type actualRhs = RhsBlasTraits::extract(rhs); ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(lhs) * RhsBlasTraits::extractScalarFactor(rhs); enum { // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 // on, the other hand it is good for the cache to pack the vector anyways... DirectlyUseRhs = ActualRhsTypeCleaned::InnerStrideAtCompileTime==1 }; gemv_static_vector_if static_rhs; ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(), DirectlyUseRhs ? const_cast(actualRhs.data()) : static_rhs.data()); if(!DirectlyUseRhs) { #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN int size = actualRhs.size(); EIGEN_DENSE_STORAGE_CTOR_PLUGIN #endif Map(actualRhsPtr, actualRhs.size()) = actualRhs; } general_matrix_vector_product ::run( actualLhs.rows(), actualLhs.cols(), actualLhs.data(), actualLhs.outerStride(), actualRhsPtr, 1, dest.data(), dest.innerStride(), actualAlpha); } }; template<> struct gemv_dense_sense_selector { template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { typedef typename Dest::Index Index; // TODO makes sure dest is sequentially stored in memory, otherwise use a temp const Index size = rhs.rows(); for(Index k=0; k struct gemv_dense_sense_selector { template static void run(const Lhs &lhs, const Rhs &rhs, Dest& dest, const typename Dest::Scalar& alpha) { typedef typename Dest::Index Index; // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp const Index rows = dest.rows(); for(Index i=0; i template inline const Product MatrixBase::operator*(const MatrixBase &other) const { // A note regarding the function declaration: In MSVC, this function will sometimes // not be inlined since DenseStorage is an unwindable object for dynamic // matrices and product types are holding a member to store the result. // Thus it does not help tagging this function with EIGEN_STRONG_INLINE. enum { ProductIsValid = Derived::ColsAtCompileTime==Dynamic || OtherDerived::RowsAtCompileTime==Dynamic || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) }; // note to the lost user: // * for a dot product use: v1.dot(v2) // * for a coeff-wise product use: v1.cwiseProduct(v2) EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) #ifdef EIGEN_DEBUG_PRODUCT internal::product_type::debug(); #endif return Product(derived(), other.derived()); } #endif // __CUDACC__ /** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation. * * The returned product will behave like any other expressions: the coefficients of the product will be * computed once at a time as requested. This might be useful in some extremely rare cases when only * a small and no coherent fraction of the result's coefficients have to be computed. * * \warning This version of the matrix product can be much much slower. So use it only if you know * what you are doing and that you measured a true speed improvement. * * \sa operator*(const MatrixBase&) */ template template const Product MatrixBase::lazyProduct(const MatrixBase &other) const { enum { ProductIsValid = Derived::ColsAtCompileTime==Dynamic || OtherDerived::RowsAtCompileTime==Dynamic || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) }; // note to the lost user: // * for a dot product use: v1.dot(v2) // * for a coeff-wise product use: v1.cwiseProduct(v2) EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) return Product(derived(), other.derived()); } } // end namespace Eigen #endif // EIGEN_PRODUCT_H