// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_BLOCK_PANEL_H #define EIGEN_GENERAL_BLOCK_PANEL_H namespace Eigen { namespace internal { template class gebp_traits; /** \internal \returns b if a<=0, and returns a otherwise. */ inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b) { return a<=0 ? b : a; } #if EIGEN_ARCH_i386_OR_x86_64 const std::ptrdiff_t defaultL1CacheSize = 32*1024; const std::ptrdiff_t defaultL2CacheSize = 256*1024; const std::ptrdiff_t defaultL3CacheSize = 2*1024*1024; #else const std::ptrdiff_t defaultL1CacheSize = 16*1024; const std::ptrdiff_t defaultL2CacheSize = 512*1024; const std::ptrdiff_t defaultL3CacheSize = 512*1024; #endif /** \internal */ inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1, std::ptrdiff_t* l2, std::ptrdiff_t* l3) { static bool m_cache_sizes_initialized = false; static std::ptrdiff_t m_l1CacheSize = 0; static std::ptrdiff_t m_l2CacheSize = 0; static std::ptrdiff_t m_l3CacheSize = 0; if(EIGEN_UNLIKELY(!m_cache_sizes_initialized)) { int l1CacheSize, l2CacheSize, l3CacheSize; queryCacheSizes(l1CacheSize, l2CacheSize, l3CacheSize); m_l1CacheSize = manage_caching_sizes_helper(l1CacheSize, defaultL1CacheSize); m_l2CacheSize = manage_caching_sizes_helper(l2CacheSize, defaultL2CacheSize); m_l3CacheSize = manage_caching_sizes_helper(l3CacheSize, defaultL3CacheSize); m_cache_sizes_initialized = true; } if(EIGEN_UNLIKELY(action==SetAction)) { // set the cpu cache size and cache all block sizes from a global cache size in byte eigen_internal_assert(l1!=0 && l2!=0); m_l1CacheSize = *l1; m_l2CacheSize = *l2; m_l3CacheSize = *l3; } else if(EIGEN_LIKELY(action==GetAction)) { eigen_internal_assert(l1!=0 && l2!=0); *l1 = m_l1CacheSize; *l2 = m_l2CacheSize; *l3 = m_l3CacheSize; } else { eigen_internal_assert(false); } } #define CEIL(a, b) ((a)+(b)-1)/(b) /* Helper for computeProductBlockingSizes. * * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar, * this function computes the blocking size parameters along the respective dimensions * for matrix products and related algorithms. The blocking sizes depends on various * parameters: * - the L1 and L2 cache sizes, * - the register level blocking sizes defined by gebp_traits, * - the number of scalars that fit into a packet (when vectorization is enabled). * * \sa setCpuCacheSizes */ template void evaluateProductBlockingSizesHeuristic(Index& k, Index& m, Index& n, Index num_threads = 1) { // Explanations: // Let's recall the product algorithms form kc x nc horizontal panels B' on the rhs and // mc x kc blocks A' on the lhs. A' has to fit into L2 cache. Moreover, B' is processed // per kc x nr vertical small panels where nr is the blocking size along the n dimension // at the register level. For vectorization purpose, these small vertical panels are unpacked, // e.g., each coefficient is replicated to fit a packet. This small vertical panel has to // stay in L1 cache. typedef gebp_traits Traits; typedef typename Traits::ResScalar ResScalar; enum { kdiv = KcFactor * (Traits::mr * sizeof(LhsScalar) + Traits::nr * sizeof(RhsScalar)), ksub = Traits::mr * Traits::nr * sizeof(ResScalar), k_mask = (0xffffffff/8)*8, mr = Traits::mr, mr_mask = (0xffffffff/mr)*mr, nr = Traits::nr, nr_mask = (0xffffffff/nr)*nr }; std::ptrdiff_t l1, l2, l3; manage_caching_sizes(GetAction, &l1, &l2, &l3); // Increasing k gives us more time to prefetch the content of the "C" // registers. However once the latency is hidden there is no point in // increasing the value of k, so we'll cap it at 320 (value determined // experimentally). const Index k_cache = (std::min)((l1-ksub)/kdiv, 320); if (k_cache < k) { k = k_cache & k_mask; eigen_assert(k > 0); } const Index n_cache = (l2-l1) / (nr * sizeof(RhsScalar) * k); Index n_per_thread = CEIL(n, num_threads); if (n_cache <= n_per_thread) { // Don't exceed the capacity of the l2 cache. if (n_cache < nr) { n = nr; } else { n = n_cache & nr_mask; eigen_assert(n > 0); } } else { n = (std::min)(n, (n_per_thread + nr - 1) & nr_mask); } if (l3 > l2) { // l3 is shared between all cores, so we'll give each thread its own chunk of l3. const Index m_cache = (l3-l2) / (sizeof(LhsScalar) * k * num_threads); const Index m_per_thread = CEIL(m, num_threads); if(m_cache < m_per_thread && m_cache >= static_cast(mr)) { m = m_cache & mr_mask; eigen_assert(m > 0); } else { m = (std::min)(m, (m_per_thread + mr - 1) & mr_mask); } } } template bool useSpecificBlockingSizes(Index& k, Index& m, Index& n) { #ifdef EIGEN_TEST_SPECIFIC_BLOCKING_SIZES if (EIGEN_TEST_SPECIFIC_BLOCKING_SIZES) { k = std::min(k, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_K); m = std::min(m, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_M); n = std::min(n, EIGEN_TEST_SPECIFIC_BLOCKING_SIZE_N); return true; } #else EIGEN_UNUSED_VARIABLE(k) EIGEN_UNUSED_VARIABLE(m) EIGEN_UNUSED_VARIABLE(n) #endif return false; } /** \brief Computes the blocking parameters for a m x k times k x n matrix product * * \param[in,out] k Input: the third dimension of the product. Output: the blocking size along the same dimension. * \param[in,out] m Input: the number of rows of the left hand side. Output: the blocking size along the same dimension. * \param[in,out] n Input: the number of columns of the right hand side. Output: the blocking size along the same dimension. * * Given a m x k times k x n matrix product of scalar types \c LhsScalar and \c RhsScalar, * this function computes the blocking size parameters along the respective dimensions * for matrix products and related algorithms. * * The blocking size parameters may be evaluated: * - either by a heuristic based on cache sizes; * - or using fixed prescribed values (for testing purposes). * * \sa setCpuCacheSizes */ template void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads = 1) { if (!k || !m || !n) { return; } if (!useSpecificBlockingSizes(k, m, n)) { evaluateProductBlockingSizesHeuristic(k, m, n, num_threads); } #if !EIGEN_ARCH_i386_OR_x86_64 // The following code rounds k,m,n down to the nearest multiple of register-level blocking sizes. // We should always do that, and in upstream Eigen we always do that. // Unfortunately, we can't do that in Google3 on x86[-64] because this makes tiny differences in results and // we have some unfortunate tests require very specific relative errors which fail because of that, // at least //learning/laser/algorithms/wals:wals_batch_solver_test. // Note that this wouldn't make any difference if we had been using only correctly rounded values, // but we've not! See how in evaluateProductBlockingSizesHeuristic, we do the rounding down by // bit-masking, e.g. mr_mask = (0xffffffff/mr)*mr, implicitly assuming that mr is always a power of // two, which is not the case with the 3px4 kernel. typedef gebp_traits Traits; enum { kr = 8, mr = Traits::mr, nr = Traits::nr }; if (k > kr) k -= k % kr; if (m > mr) m -= m % mr; if (n > nr) n -= n % nr; #endif } template inline void computeProductBlockingSizes(Index& k, Index& m, Index& n, Index num_threads) { computeProductBlockingSizes(k, m, n, num_threads); } #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD #define CJMADD(CJ,A,B,C,T) C = CJ.pmadd(A,B,C); #else // FIXME (a bit overkill maybe ?) template struct gebp_madd_selector { EIGEN_ALWAYS_INLINE static void run(const CJ& cj, A& a, B& b, C& c, T& /*t*/) { c = cj.pmadd(a,b,c); } }; template struct gebp_madd_selector { EIGEN_ALWAYS_INLINE static void run(const CJ& cj, T& a, T& b, T& c, T& t) { t = b; t = cj.pmul(a,t); c = padd(c,t); } }; template EIGEN_STRONG_INLINE void gebp_madd(const CJ& cj, A& a, B& b, C& c, T& t) { gebp_madd_selector::run(cj,a,b,c,t); } #define CJMADD(CJ,A,B,C,T) gebp_madd(CJ,A,B,C,T); // #define CJMADD(CJ,A,B,C,T) T = B; T = CJ.pmul(A,T); C = padd(C,T); #endif /* Vectorization logic * real*real: unpack rhs to constant packets, ... * * cd*cd : unpack rhs to (b_r,b_r), (b_i,b_i), mul to get (a_r b_r,a_i b_r) (a_r b_i,a_i b_i), * storing each res packet into two packets (2x2), * at the end combine them: swap the second and addsub them * cf*cf : same but with 2x4 blocks * cplx*real : unpack rhs to constant packets, ... * real*cplx : load lhs as (a0,a0,a1,a1), and mul as usual */ template class gebp_traits { public: typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; typedef typename scalar_product_traits::ReturnType ResScalar; enum { ConjLhs = _ConjLhs, ConjRhs = _ConjRhs, Vectorizable = packet_traits::Vectorizable && packet_traits::Vectorizable, LhsPacketSize = Vectorizable ? packet_traits::size : 1, RhsPacketSize = Vectorizable ? packet_traits::size : 1, ResPacketSize = Vectorizable ? packet_traits::size : 1, NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, // register block size along the N direction must be 1 or 4 nr = 4, // register block size along the M direction (currently, this one cannot be modified) default_mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize, #if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX) // we assume 16 registers mr = Vectorizable ? 3*LhsPacketSize : default_mr, #else mr = default_mr, #endif LhsProgress = LhsPacketSize, RhsProgress = 1 }; typedef typename packet_traits::type _LhsPacket; typedef typename packet_traits::type _RhsPacket; typedef typename packet_traits::type _ResPacket; typedef typename conditional::type LhsPacket; typedef typename conditional::type RhsPacket; typedef typename conditional::type ResPacket; typedef ResPacket AccPacket; EIGEN_STRONG_INLINE void initAcc(AccPacket& p) { p = pset1(ResScalar(0)); } EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3) { pbroadcast4(b, b0, b1, b2, b3); } // EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1) // { // pbroadcast2(b, b0, b1); // } template EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const { dest = pset1(*b); } EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const { dest = ploadquad(b); } template EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacketType& dest) const { dest = pload(a); } template EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const { dest = ploadu(a); } template EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp) const { // It would be a lot cleaner to call pmadd all the time. Unfortunately if we // let gcc allocate the register in which to store the result of the pmul // (in the case where there is no FMA) gcc fails to figure out how to avoid // spilling register. #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD EIGEN_UNUSED_VARIABLE(tmp); c = pmadd(a,b,c); #else tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp); #endif } EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const { r = pmadd(c,alpha,r); } template EIGEN_STRONG_INLINE void acc(const ResPacketHalf& c, const ResPacketHalf& alpha, ResPacketHalf& r) const { r = pmadd(c,alpha,r); } protected: // conj_helper cj; // conj_helper pcj; }; template class gebp_traits, RealScalar, _ConjLhs, false> { public: typedef std::complex LhsScalar; typedef RealScalar RhsScalar; typedef typename scalar_product_traits::ReturnType ResScalar; enum { ConjLhs = _ConjLhs, ConjRhs = false, Vectorizable = packet_traits::Vectorizable && packet_traits::Vectorizable, LhsPacketSize = Vectorizable ? packet_traits::size : 1, RhsPacketSize = Vectorizable ? packet_traits::size : 1, ResPacketSize = Vectorizable ? packet_traits::size : 1, NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, nr = 4, #if defined(EIGEN_HAS_SINGLE_INSTRUCTION_MADD) && !defined(EIGEN_VECTORIZE_ALTIVEC) && !defined(EIGEN_VECTORIZE_VSX) // we assume 16 registers mr = 3*LhsPacketSize, #else mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*LhsPacketSize, #endif LhsProgress = LhsPacketSize, RhsProgress = 1 }; typedef typename packet_traits::type _LhsPacket; typedef typename packet_traits::type _RhsPacket; typedef typename packet_traits::type _ResPacket; typedef typename conditional::type LhsPacket; typedef typename conditional::type RhsPacket; typedef typename conditional::type ResPacket; typedef ResPacket AccPacket; EIGEN_STRONG_INLINE void initAcc(AccPacket& p) { p = pset1(ResScalar(0)); } EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const { dest = pset1(*b); } EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const { dest = pset1(*b); } EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const { dest = pload(a); } EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const { dest = ploadu(a); } EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3) { pbroadcast4(b, b0, b1, b2, b3); } // EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1) // { // pbroadcast2(b, b0, b1); // } EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const { madd_impl(a, b, c, tmp, typename conditional::type()); } EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const { #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD EIGEN_UNUSED_VARIABLE(tmp); c.v = pmadd(a.v,b,c.v); #else tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp); #endif } EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const { c += a * b; } EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const { r = cj.pmadd(c,alpha,r); } protected: conj_helper cj; }; template struct DoublePacket { Packet first; Packet second; }; template DoublePacket padd(const DoublePacket &a, const DoublePacket &b) { DoublePacket res; res.first = padd(a.first, b.first); res.second = padd(a.second,b.second); return res; } template const DoublePacket& predux4(const DoublePacket &a) { return a; } template struct unpacket_traits > { typedef DoublePacket half; }; // template // DoublePacket pmadd(const DoublePacket &a, const DoublePacket &b) // { // DoublePacket res; // res.first = padd(a.first, b.first); // res.second = padd(a.second,b.second); // return res; // } template class gebp_traits, std::complex, _ConjLhs, _ConjRhs > { public: typedef std::complex Scalar; typedef std::complex LhsScalar; typedef std::complex RhsScalar; typedef std::complex ResScalar; enum { ConjLhs = _ConjLhs, ConjRhs = _ConjRhs, Vectorizable = packet_traits::Vectorizable && packet_traits::Vectorizable, RealPacketSize = Vectorizable ? packet_traits::size : 1, ResPacketSize = Vectorizable ? packet_traits::size : 1, LhsPacketSize = Vectorizable ? packet_traits::size : 1, RhsPacketSize = Vectorizable ? packet_traits::size : 1, // FIXME: should depend on NumberOfRegisters nr = 4, mr = ResPacketSize, LhsProgress = ResPacketSize, RhsProgress = 1 }; typedef typename packet_traits::type RealPacket; typedef typename packet_traits::type ScalarPacket; typedef DoublePacket DoublePacketType; typedef typename conditional::type LhsPacket; typedef typename conditional::type RhsPacket; typedef typename conditional::type ResPacket; typedef typename conditional::type AccPacket; EIGEN_STRONG_INLINE void initAcc(Scalar& p) { p = Scalar(0); } EIGEN_STRONG_INLINE void initAcc(DoublePacketType& p) { p.first = pset1(RealScalar(0)); p.second = pset1(RealScalar(0)); } // Scalar path EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, ResPacket& dest) const { dest = pset1(*b); } // Vectorized path EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, DoublePacketType& dest) const { dest.first = pset1(real(*b)); dest.second = pset1(imag(*b)); } EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const { loadRhs(b,dest); } EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const { eigen_internal_assert(unpacket_traits::size<=4); loadRhs(b,dest); } EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3) { // FIXME not sure that's the best way to implement it! loadRhs(b+0, b0); loadRhs(b+1, b1); loadRhs(b+2, b2); loadRhs(b+3, b3); } // Vectorized path EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, DoublePacketType& b0, DoublePacketType& b1) { // FIXME not sure that's the best way to implement it! loadRhs(b+0, b0); loadRhs(b+1, b1); } // Scalar path EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsScalar& b0, RhsScalar& b1) { // FIXME not sure that's the best way to implement it! loadRhs(b+0, b0); loadRhs(b+1, b1); } // nothing special here EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const { dest = pload((const typename unpacket_traits::type*)(a)); } EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const { dest = ploadu((const typename unpacket_traits::type*)(a)); } EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, DoublePacketType& c, RhsPacket& /*tmp*/) const { c.first = padd(pmul(a,b.first), c.first); c.second = padd(pmul(a,b.second),c.second); } EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, ResPacket& c, RhsPacket& /*tmp*/) const { c = cj.pmadd(a,b,c); } EIGEN_STRONG_INLINE void acc(const Scalar& c, const Scalar& alpha, Scalar& r) const { r += alpha * c; } EIGEN_STRONG_INLINE void acc(const DoublePacketType& c, const ResPacket& alpha, ResPacket& r) const { // assemble c ResPacket tmp; if((!ConjLhs)&&(!ConjRhs)) { tmp = pcplxflip(pconj(ResPacket(c.second))); tmp = padd(ResPacket(c.first),tmp); } else if((!ConjLhs)&&(ConjRhs)) { tmp = pconj(pcplxflip(ResPacket(c.second))); tmp = padd(ResPacket(c.first),tmp); } else if((ConjLhs)&&(!ConjRhs)) { tmp = pcplxflip(ResPacket(c.second)); tmp = padd(pconj(ResPacket(c.first)),tmp); } else if((ConjLhs)&&(ConjRhs)) { tmp = pcplxflip(ResPacket(c.second)); tmp = psub(pconj(ResPacket(c.first)),tmp); } r = pmadd(tmp,alpha,r); } protected: conj_helper cj; }; template class gebp_traits, false, _ConjRhs > { public: typedef std::complex Scalar; typedef RealScalar LhsScalar; typedef Scalar RhsScalar; typedef Scalar ResScalar; enum { ConjLhs = false, ConjRhs = _ConjRhs, Vectorizable = packet_traits::Vectorizable && packet_traits::Vectorizable, LhsPacketSize = Vectorizable ? packet_traits::size : 1, RhsPacketSize = Vectorizable ? packet_traits::size : 1, ResPacketSize = Vectorizable ? packet_traits::size : 1, NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS, // FIXME: should depend on NumberOfRegisters nr = 4, mr = (EIGEN_PLAIN_ENUM_MIN(16,NumberOfRegisters)/2/nr)*ResPacketSize, LhsProgress = ResPacketSize, RhsProgress = 1 }; typedef typename packet_traits::type _LhsPacket; typedef typename packet_traits::type _RhsPacket; typedef typename packet_traits::type _ResPacket; typedef typename conditional::type LhsPacket; typedef typename conditional::type RhsPacket; typedef typename conditional::type ResPacket; typedef ResPacket AccPacket; EIGEN_STRONG_INLINE void initAcc(AccPacket& p) { p = pset1(ResScalar(0)); } EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const { dest = pset1(*b); } void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3) { pbroadcast4(b, b0, b1, b2, b3); } // EIGEN_STRONG_INLINE void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1) // { // // FIXME not sure that's the best way to implement it! // b0 = pload1(b+0); // b1 = pload1(b+1); // } EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const { dest = ploaddup(a); } EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const { eigen_internal_assert(unpacket_traits::size<=4); loadRhs(b,dest); } EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const { dest = ploaddup(a); } EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp) const { madd_impl(a, b, c, tmp, typename conditional::type()); } EIGEN_STRONG_INLINE void madd_impl(const LhsPacket& a, const RhsPacket& b, AccPacket& c, RhsPacket& tmp, const true_type&) const { #ifdef EIGEN_HAS_SINGLE_INSTRUCTION_MADD EIGEN_UNUSED_VARIABLE(tmp); c.v = pmadd(a,b.v,c.v); #else tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp); #endif } EIGEN_STRONG_INLINE void madd_impl(const LhsScalar& a, const RhsScalar& b, ResScalar& c, RhsScalar& /*tmp*/, const false_type&) const { c += a * b; } EIGEN_STRONG_INLINE void acc(const AccPacket& c, const ResPacket& alpha, ResPacket& r) const { r = cj.pmadd(alpha,c,r); } protected: conj_helper cj; }; // helper for the rotating kernel below template struct PossiblyRotatingKernelHelper { // default implementation, not rotating typedef typename GebpKernel::Traits Traits; typedef typename Traits::RhsScalar RhsScalar; typedef typename Traits::RhsPacket RhsPacket; typedef typename Traits::AccPacket AccPacket; const Traits& traits; EIGEN_ALWAYS_INLINE PossiblyRotatingKernelHelper(const Traits& t) : traits(t) {} template EIGEN_ALWAYS_INLINE void loadOrRotateRhs(RhsPacket& to, const RhsScalar* from) const { traits.loadRhs(from + (Index+4*K)*Traits::RhsProgress, to); } EIGEN_ALWAYS_INLINE void unrotateResult(AccPacket&, AccPacket&, AccPacket&, AccPacket&) { } }; // rotating implementation template struct PossiblyRotatingKernelHelper { typedef typename GebpKernel::Traits Traits; typedef typename Traits::RhsScalar RhsScalar; typedef typename Traits::RhsPacket RhsPacket; typedef typename Traits::AccPacket AccPacket; const Traits& traits; EIGEN_ALWAYS_INLINE PossiblyRotatingKernelHelper(const Traits& t) : traits(t) {} template EIGEN_ALWAYS_INLINE void loadOrRotateRhs(RhsPacket& to, const RhsScalar* from) const { if (Index == 0) { to = pload(from + 4*K*Traits::RhsProgress); } else { EIGEN_ASM_COMMENT("Do not reorder code, we're very tight on registers"); to = protate<1>(to); } } EIGEN_ALWAYS_INLINE void unrotateResult(AccPacket& res0, AccPacket& res1, AccPacket& res2, AccPacket& res3) { PacketBlock resblock; resblock.packet[0] = res0; resblock.packet[1] = res1; resblock.packet[2] = res2; resblock.packet[3] = res3; ptranspose(resblock); resblock.packet[3] = protate<1>(resblock.packet[3]); resblock.packet[2] = protate<2>(resblock.packet[2]); resblock.packet[1] = protate<3>(resblock.packet[1]); ptranspose(resblock); res0 = resblock.packet[0]; res1 = resblock.packet[1]; res2 = resblock.packet[2]; res3 = resblock.packet[3]; } }; /* optimized GEneral packed Block * packed Panel product kernel * * Mixing type logic: C += A * B * | A | B | comments * |real |cplx | no vectorization yet, would require to pack A with duplication * |cplx |real | easy vectorization */ template struct gebp_kernel { typedef gebp_traits Traits; typedef typename Traits::ResScalar ResScalar; typedef typename Traits::LhsPacket LhsPacket; typedef typename Traits::RhsPacket RhsPacket; typedef typename Traits::ResPacket ResPacket; typedef typename Traits::AccPacket AccPacket; typedef gebp_traits SwappedTraits; typedef typename SwappedTraits::ResScalar SResScalar; typedef typename SwappedTraits::LhsPacket SLhsPacket; typedef typename SwappedTraits::RhsPacket SRhsPacket; typedef typename SwappedTraits::ResPacket SResPacket; typedef typename SwappedTraits::AccPacket SAccPacket; typedef typename DataMapper::LinearMapper LinearMapper; enum { Vectorizable = Traits::Vectorizable, LhsProgress = Traits::LhsProgress, RhsProgress = Traits::RhsProgress, ResPacketSize = Traits::ResPacketSize }; EIGEN_DONT_INLINE void operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); static const bool UseRotatingKernel = EIGEN_ARCH_ARM && internal::is_same::value && internal::is_same::value && internal::is_same::value && Traits::LhsPacketSize == 4 && Traits::RhsPacketSize == 4 && Traits::ResPacketSize == 4; }; template EIGEN_DONT_INLINE void gebp_kernel ::operator()(const DataMapper& res, const LhsScalar* blockA, const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) { Traits traits; SwappedTraits straits; if(strideA==-1) strideA = depth; if(strideB==-1) strideB = depth; conj_helper cj; Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0; const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0; const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0; const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0; enum { pk = 8 }; // NOTE Such a large peeling factor is important for large matrices (~ +5% when >1000 on Haswell) const Index peeled_kc = depth & ~(pk-1); const Index prefetch_res_offset = 0; // const Index depth2 = depth & ~1; //---------- Process 3 * LhsProgress rows at once ---------- // This corresponds to 3*LhsProgress x nr register blocks. // Usually, make sense only with FMA if(mr>=3*Traits::LhsProgress) { PossiblyRotatingKernelHelper possiblyRotatingKernelHelper(traits); // loops on each largest micro horizontal panel of lhs (3*Traits::LhsProgress x depth) for(Index i=0; i(B_0, blB); \ traits.madd(A0, B_0, C0, T0); \ traits.madd(A1, B_0, C4, T0); \ traits.madd(A2, B_0, C8, B_0); \ possiblyRotatingKernelHelper.template loadOrRotateRhs(B_0, blB); \ traits.madd(A0, B_0, C1, T0); \ traits.madd(A1, B_0, C5, T0); \ traits.madd(A2, B_0, C9, B_0); \ possiblyRotatingKernelHelper.template loadOrRotateRhs(B_0, blB); \ traits.madd(A0, B_0, C2, T0); \ traits.madd(A1, B_0, C6, T0); \ traits.madd(A2, B_0, C10, B_0); \ possiblyRotatingKernelHelper.template loadOrRotateRhs(B_0, blB); \ traits.madd(A0, B_0, C3 , T0); \ traits.madd(A1, B_0, C7, T0); \ traits.madd(A2, B_0, C11, B_0); \ EIGEN_ASM_COMMENT("end step of gebp micro kernel 3pX4"); \ } while(false) internal::prefetch(blB); EIGEN_GEBP_ONESTEP(0); EIGEN_GEBP_ONESTEP(1); EIGEN_GEBP_ONESTEP(2); EIGEN_GEBP_ONESTEP(3); EIGEN_GEBP_ONESTEP(4); EIGEN_GEBP_ONESTEP(5); EIGEN_GEBP_ONESTEP(6); EIGEN_GEBP_ONESTEP(7); blB += pk*4*RhsProgress; blA += pk*3*Traits::LhsProgress; EIGEN_ASM_COMMENT("end gebp micro kernel 3pX4"); } // process remaining peeled loop for(Index k=peeled_kc; k(alpha); R0 = r0.loadPacket(0 * Traits::ResPacketSize); R1 = r0.loadPacket(1 * Traits::ResPacketSize); R2 = r0.loadPacket(2 * Traits::ResPacketSize); traits.acc(C0, alphav, R0); traits.acc(C4, alphav, R1); traits.acc(C8, alphav, R2); r0.storePacket(0 * Traits::ResPacketSize, R0); r0.storePacket(1 * Traits::ResPacketSize, R1); r0.storePacket(2 * Traits::ResPacketSize, R2); R0 = r1.loadPacket(0 * Traits::ResPacketSize); R1 = r1.loadPacket(1 * Traits::ResPacketSize); R2 = r1.loadPacket(2 * Traits::ResPacketSize); traits.acc(C1, alphav, R0); traits.acc(C5, alphav, R1); traits.acc(C9, alphav, R2); r1.storePacket(0 * Traits::ResPacketSize, R0); r1.storePacket(1 * Traits::ResPacketSize, R1); r1.storePacket(2 * Traits::ResPacketSize, R2); R0 = r2.loadPacket(0 * Traits::ResPacketSize); R1 = r2.loadPacket(1 * Traits::ResPacketSize); R2 = r2.loadPacket(2 * Traits::ResPacketSize); traits.acc(C2, alphav, R0); traits.acc(C6, alphav, R1); traits.acc(C10, alphav, R2); r2.storePacket(0 * Traits::ResPacketSize, R0); r2.storePacket(1 * Traits::ResPacketSize, R1); r2.storePacket(2 * Traits::ResPacketSize, R2); R0 = r3.loadPacket(0 * Traits::ResPacketSize); R1 = r3.loadPacket(1 * Traits::ResPacketSize); R2 = r3.loadPacket(2 * Traits::ResPacketSize); traits.acc(C3, alphav, R0); traits.acc(C7, alphav, R1); traits.acc(C11, alphav, R2); r3.storePacket(0 * Traits::ResPacketSize, R0); r3.storePacket(1 * Traits::ResPacketSize, R1); r3.storePacket(2 * Traits::ResPacketSize, R2); } // Deal with remaining columns of the rhs for(Index j2=packet_cols4; j2(alpha); R0 = r0.loadPacket(0 * Traits::ResPacketSize); R1 = r0.loadPacket(1 * Traits::ResPacketSize); R2 = r0.loadPacket(2 * Traits::ResPacketSize); traits.acc(C0, alphav, R0); traits.acc(C4, alphav, R1); traits.acc(C8, alphav, R2); r0.storePacket(0 * Traits::ResPacketSize, R0); r0.storePacket(1 * Traits::ResPacketSize, R1); r0.storePacket(2 * Traits::ResPacketSize, R2); } } } //---------- Process 2 * LhsProgress rows at once ---------- if(mr>=2*Traits::LhsProgress) { // loops on each largest micro horizontal panel of lhs (2*LhsProgress x depth) for(Index i=peeled_mc3; i(alpha); R0 = r0.loadPacket(0 * Traits::ResPacketSize); R1 = r0.loadPacket(1 * Traits::ResPacketSize); R2 = r1.loadPacket(0 * Traits::ResPacketSize); R3 = r1.loadPacket(1 * Traits::ResPacketSize); traits.acc(C0, alphav, R0); traits.acc(C4, alphav, R1); traits.acc(C1, alphav, R2); traits.acc(C5, alphav, R3); r0.storePacket(0 * Traits::ResPacketSize, R0); r0.storePacket(1 * Traits::ResPacketSize, R1); r1.storePacket(0 * Traits::ResPacketSize, R2); r1.storePacket(1 * Traits::ResPacketSize, R3); R0 = r2.loadPacket(0 * Traits::ResPacketSize); R1 = r2.loadPacket(1 * Traits::ResPacketSize); R2 = r3.loadPacket(0 * Traits::ResPacketSize); R3 = r3.loadPacket(1 * Traits::ResPacketSize); traits.acc(C2, alphav, R0); traits.acc(C6, alphav, R1); traits.acc(C3, alphav, R2); traits.acc(C7, alphav, R3); r2.storePacket(0 * Traits::ResPacketSize, R0); r2.storePacket(1 * Traits::ResPacketSize, R1); r3.storePacket(0 * Traits::ResPacketSize, R2); r3.storePacket(1 * Traits::ResPacketSize, R3); } // Deal with remaining columns of the rhs for(Index j2=packet_cols4; j2(alpha); R0 = r0.loadPacket(0 * Traits::ResPacketSize); R1 = r0.loadPacket(1 * Traits::ResPacketSize); traits.acc(C0, alphav, R0); traits.acc(C4, alphav, R1); r0.storePacket(0 * Traits::ResPacketSize, R0); r0.storePacket(1 * Traits::ResPacketSize, R1); } } } //---------- Process 1 * LhsProgress rows at once ---------- if(mr>=1*Traits::LhsProgress) { // loops on each largest micro horizontal panel of lhs (1*LhsProgress x depth) for(Index i=peeled_mc2; i(alpha); R0 = r0.loadPacket(0 * Traits::ResPacketSize); R1 = r1.loadPacket(0 * Traits::ResPacketSize); traits.acc(C0, alphav, R0); traits.acc(C1, alphav, R1); r0.storePacket(0 * Traits::ResPacketSize, R0); r1.storePacket(0 * Traits::ResPacketSize, R1); R0 = r2.loadPacket(0 * Traits::ResPacketSize); R1 = r3.loadPacket(0 * Traits::ResPacketSize); traits.acc(C2, alphav, R0); traits.acc(C3, alphav, R1); r2.storePacket(0 * Traits::ResPacketSize, R0); r3.storePacket(0 * Traits::ResPacketSize, R1); } // Deal with remaining columns of the rhs for(Index j2=packet_cols4; j2(alpha); R0 = r0.loadPacket(0 * Traits::ResPacketSize); traits.acc(C0, alphav, R0); r0.storePacket(0 * Traits::ResPacketSize, R0); } } } //---------- Process remaining rows, 1 by 1 ---------- for(Index i=peeled_mc1; i::half,SResPacket>::type SResPacketHalf; typedef typename conditional::half,SLhsPacket>::type SLhsPacketHalf; typedef typename conditional::half,SRhsPacket>::type SRhsPacketHalf; typedef typename conditional::half,SAccPacket>::type SAccPacketHalf; SResPacketHalf R = res.template gatherPacket(i, j2); SResPacketHalf alphav = pset1(alpha); if(depth-endk>0) { // We have to handle the last row of the rhs which corresponds to a half-packet SLhsPacketHalf a0; SRhsPacketHalf b0; straits.loadLhsUnaligned(blB, a0); straits.loadRhs(blA, b0); SAccPacketHalf c0 = predux4(C0); straits.madd(a0,b0,c0,b0); straits.acc(c0, alphav, R); } else { straits.acc(predux4(C0), alphav, R); } res.scatterPacket(i, j2, R); } else { SResPacket R = res.template gatherPacket(i, j2); SResPacket alphav = pset1(alpha); straits.acc(C0, alphav, R); res.scatterPacket(i, j2, R); } } else // scalar path { // get a 1 x 4 res block as registers ResScalar C0(0), C1(0), C2(0), C3(0); for(Index k=0; k struct gemm_pack_lhs { typedef typename DataMapper::LinearMapper LinearMapper; EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); }; template EIGEN_DONT_INLINE void gemm_pack_lhs ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { typedef typename packet_traits::type Packet; enum { PacketSize = packet_traits::size }; EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS"); EIGEN_UNUSED_VARIABLE(stride); EIGEN_UNUSED_VARIABLE(offset); eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) ); conj_if::IsComplex && Conjugate> cj; const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0; const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0; const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0; const Index peeled_mc0 = Pack2>=1*PacketSize ? peeled_mc1 : Pack2>1 ? (rows/Pack2)*Pack2 : 0; Index i=0; // Pack 3 packets if(Pack1>=3*PacketSize) { if(PanelMode) { for(; i=2*PacketSize) { if(PanelMode) { for(; i=1*PacketSize) { if(PanelMode) { for(; i1) { for(; i struct gemm_pack_lhs { typedef typename DataMapper::LinearMapper LinearMapper; EIGEN_DONT_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); }; template EIGEN_DONT_INLINE void gemm_pack_lhs ::operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { typedef typename packet_traits::type Packet; enum { PacketSize = packet_traits::size }; EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS"); EIGEN_UNUSED_VARIABLE(stride); EIGEN_UNUSED_VARIABLE(offset); eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); conj_if::IsComplex && Conjugate> cj; // const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0; // const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0; // const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0; int pack = Pack1; Index i = 0; while(pack>0) { Index remaining_rows = rows-i; Index peeled_mc = i+(remaining_rows/pack)*pack; for(; i=PacketSize) { for(; k kernel; for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = lhs.loadPacket(i+p+m, k); ptranspose(kernel); for (int p = 0; p < PacketSize; ++p) pstore(blockA+m+(pack)*p, cj.pconj(kernel.packet[p])); } blockA += PacketSize*pack; } } for(; k struct gemm_pack_rhs { typedef typename packet_traits::type Packet; typedef typename DataMapper::LinearMapper LinearMapper; enum { PacketSize = packet_traits::size }; EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); }; template EIGEN_DONT_INLINE void gemm_pack_rhs ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS COLMAJOR"); EIGEN_UNUSED_VARIABLE(stride); EIGEN_UNUSED_VARIABLE(offset); eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); conj_if::IsComplex && Conjugate> cj; Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0; Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0; const Index peeled_k = (depth/PacketSize)*PacketSize; // if(nr>=8) // { // for(Index j2=0; j2 kernel; // for (int p = 0; p < PacketSize; ++p) { // kernel.packet[p] = ploadu(&rhs[(j2+p)*rhsStride+k]); // } // ptranspose(kernel); // for (int p = 0; p < PacketSize; ++p) { // pstoreu(blockB+count, cj.pconj(kernel.packet[p])); // count+=PacketSize; // } // } // } // for(; k=4) { for(Index j2=packet_cols8; j2 kernel; kernel.packet[0] = dm0.loadPacket(k); kernel.packet[1] = dm1.loadPacket(k); kernel.packet[2] = dm2.loadPacket(k); kernel.packet[3] = dm3.loadPacket(k); ptranspose(kernel); pstoreu(blockB+0*PacketSize, cj.pconj(kernel.packet[0])); pstoreu(blockB+1*PacketSize, cj.pconj(kernel.packet[1])); pstoreu(blockB+2*PacketSize, cj.pconj(kernel.packet[2])); pstoreu(blockB+3*PacketSize, cj.pconj(kernel.packet[3])); blockB+=4*PacketSize; } } for(; k struct gemm_pack_rhs { typedef typename packet_traits::type Packet; typedef typename packet_traits::half HalfPacket; typedef typename DataMapper::LinearMapper LinearMapper; enum { PacketSize = packet_traits::size, HalfPacketSize = packet_traits::HasHalfPacket ? unpacket_traits::half>::size : 0 }; EIGEN_DONT_INLINE void operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); }; template EIGEN_DONT_INLINE void gemm_pack_rhs ::operator()(Scalar* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK RHS ROWMAJOR"); EIGEN_UNUSED_VARIABLE(stride); EIGEN_UNUSED_VARIABLE(offset); eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride)); conj_if::IsComplex && Conjugate> cj; Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0; Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0; // if(nr>=8) // { // for(Index j2=0; j2(&rhs[k*rhsStride + j2]); // pstoreu(blockB+count, cj.pconj(A)); // } else if (PacketSize==4) { // Packet A = ploadu(&rhs[k*rhsStride + j2]); // Packet B = ploadu(&rhs[k*rhsStride + j2 + PacketSize]); // pstoreu(blockB+count, cj.pconj(A)); // pstoreu(blockB+count+PacketSize, cj.pconj(B)); // } else { // const Scalar* b0 = &rhs[k*rhsStride + j2]; // blockB[count+0] = cj(b0[0]); // blockB[count+1] = cj(b0[1]); // blockB[count+2] = cj(b0[2]); // blockB[count+3] = cj(b0[3]); // blockB[count+4] = cj(b0[4]); // blockB[count+5] = cj(b0[5]); // blockB[count+6] = cj(b0[6]); // blockB[count+7] = cj(b0[7]); // } // count += 8; // } // // skip what we have after // if(PanelMode) count += 8 * (stride-offset-depth); // } // } if(nr>=4) { for(Index j2=packet_cols8; j2(blockB, cj.pconj(A)); blockB += HalfPacketSize; } else { const LinearMapper dm0 = rhs.getLinearMapper(k, j2); blockB[0] = cj(dm0(0)); blockB[1] = cj(dm0(1)); blockB[2] = cj(dm0(2)); blockB[3] = cj(dm0(3)); blockB += 4; } } // skip what we have after if(PanelMode) blockB += 4 * (stride-offset-depth); } } // copy the remaining columns one at a time (nr==1) for(Index j2=packet_cols4; j2