aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen/src/Core
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2014-04-16 17:05:11 +0200
committerGravatar Gael Guennebaud <g.gael@free.fr>2014-04-16 17:05:11 +0200
commitd5a795f67366db20a132cc70e4f0217f42372357 (patch)
tree74df7a911811e64a4fa0baff940abe9c97abd5b6 /Eigen/src/Core
parentfeaf7c7e6d01a4804cee5949a01ece1f8a46866f (diff)
New gebp kernel handling up to 3 packets x 4 register-level blocks. Huge speeup on Haswell.
This changeset also introduce new vector functions: ploadquad and predux4.
Diffstat (limited to 'Eigen/src/Core')
-rwxr-xr-xEigen/src/Core/GenericPacketMath.h35
-rw-r--r--Eigen/src/Core/arch/AVX/Complex.h4
-rw-r--r--Eigen/src/Core/arch/AVX/PacketMath.h29
-rwxr-xr-xEigen/src/Core/arch/AltiVec/PacketMath.h4
-rw-r--r--Eigen/src/Core/arch/NEON/Complex.h2
-rw-r--r--Eigen/src/Core/arch/NEON/PacketMath.h4
-rw-r--r--Eigen/src/Core/arch/SSE/Complex.h4
-rwxr-xr-xEigen/src/Core/arch/SSE/PacketMath.h6
-rw-r--r--Eigen/src/Core/products/GeneralBlockPanelKernel.h1412
-rw-r--r--Eigen/src/Core/products/GeneralMatrixMatrix.h123
-rw-r--r--Eigen/src/Core/products/Parallelizer.h25
-rw-r--r--Eigen/src/Core/products/SelfadjointMatrixMatrix.h33
12 files changed, 1056 insertions, 625 deletions
diff --git a/Eigen/src/Core/GenericPacketMath.h b/Eigen/src/Core/GenericPacketMath.h
index 3e5db1a88..147298009 100755
--- a/Eigen/src/Core/GenericPacketMath.h
+++ b/Eigen/src/Core/GenericPacketMath.h
@@ -161,14 +161,6 @@ pload(const typename unpacket_traits<Packet>::type* from) { return *from; }
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
ploadu(const typename unpacket_traits<Packet>::type* from) { return *from; }
-/** \internal \returns a packet with elements of \a *from duplicated.
- * For instance, for a packet of 8 elements, 4 scalar will be read from \a *from and
- * duplicated to form: {from[0],from[0],from[1],from[1],,from[2],from[2],,from[3],from[3]}
- * Currently, this function is only used for scalar * complex products.
- */
-template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
-ploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; }
-
/** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pset1(const typename unpacket_traits<Packet>::type& a) { return a; }
@@ -177,6 +169,24 @@ pset1(const typename unpacket_traits<Packet>::type& a) { return a; }
template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
pload1(const typename unpacket_traits<Packet>::type *a) { return pset1<Packet>(*a); }
+/** \internal \returns a packet with elements of \a *from duplicated.
+ * For instance, for a packet of 8 elements, 4 scalars will be read from \a *from and
+ * duplicated to form: {from[0],from[0],from[1],from[1],from[2],from[2],from[3],from[3]}
+ * Currently, this function is only used for scalar * complex products.
+ */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+ploaddup(const typename unpacket_traits<Packet>::type* from) { return *from; }
+
+/** \internal \returns a packet with elements of \a *from quadrupled.
+ * For instance, for a packet of 8 elements, 2 scalars will be read from \a *from and
+ * replicated to form: {from[0],from[0],from[0],from[0],from[1],from[1],from[1],from[1]}
+ * Currently, this function is only used in matrix products.
+ * For packet-size smaller or equal to 4, this function is equivalent to pload1
+ */
+template<typename Packet> EIGEN_DEVICE_FUNC inline Packet
+ploadquad(const typename unpacket_traits<Packet>::type* from)
+{ return pload1<Packet>(from); }
+
/** \internal equivalent to
* \code
* a0 = pload1(a+0);
@@ -249,6 +259,15 @@ preduxp(const Packet* vecs) { return vecs[0]; }
template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux(const Packet& a)
{ return a; }
+/** \internal \returns the sum of the elements of \a a by block of 4 elements.
+ * For a packet {a0, a1, a2, a3, a4, a5, a6, a7}, it returns a half packet {a0+a4, a1+a5, a2+a6, a3+a7}
+ * For packet-size smaller or equal to 4, this boils down to a noop.
+ */
+template<typename Packet> EIGEN_DEVICE_FUNC inline
+typename conditional<(unpacket_traits<Packet>::size%8)==0,typename unpacket_traits<Packet>::half,Packet>::type
+predux4(const Packet& a)
+{ return a; }
+
/** \internal \returns the product of the elements of \a a*/
template<typename Packet> EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_mul(const Packet& a)
{ return a; }
diff --git a/Eigen/src/Core/arch/AVX/Complex.h b/Eigen/src/Core/arch/AVX/Complex.h
index cb16180c5..8f95a7be7 100644
--- a/Eigen/src/Core/arch/AVX/Complex.h
+++ b/Eigen/src/Core/arch/AVX/Complex.h
@@ -45,7 +45,7 @@ template<> struct packet_traits<std::complex<float> > : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet4cf> { typedef std::complex<float> type; enum {size=4}; };
+template<> struct unpacket_traits<Packet4cf> { typedef std::complex<float> type; enum {size=4}; typedef Packet2cf half; };
template<> EIGEN_STRONG_INLINE Packet4cf padd<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_add_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet4cf psub<Packet4cf>(const Packet4cf& a, const Packet4cf& b) { return Packet4cf(_mm256_sub_ps(a.v,b.v)); }
@@ -271,7 +271,7 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet2cd> { typedef std::complex<double> type; enum {size=2}; };
+template<> struct unpacket_traits<Packet2cd> { typedef std::complex<double> type; enum {size=2}; typedef Packet1cd half; };
template<> EIGEN_STRONG_INLINE Packet2cd padd<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_add_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cd psub<Packet2cd>(const Packet2cd& a, const Packet2cd& b) { return Packet2cd(_mm256_sub_pd(a.v,b.v)); }
diff --git a/Eigen/src/Core/arch/AVX/PacketMath.h b/Eigen/src/Core/arch/AVX/PacketMath.h
index 38f52ecc8..47e10f6da 100644
--- a/Eigen/src/Core/arch/AVX/PacketMath.h
+++ b/Eigen/src/Core/arch/AVX/PacketMath.h
@@ -83,9 +83,9 @@ template<> struct packet_traits<int> : default_packet_traits
};
*/
-template<> struct unpacket_traits<Packet8f> { typedef float type; enum {size=8}; };
-template<> struct unpacket_traits<Packet4d> { typedef double type; enum {size=4}; };
-template<> struct unpacket_traits<Packet8i> { typedef int type; enum {size=8}; };
+template<> struct unpacket_traits<Packet8f> { typedef float type; typedef Packet4f half; enum {size=8}; };
+template<> struct unpacket_traits<Packet4d> { typedef double type; typedef Packet2d half; enum {size=4}; };
+template<> struct unpacket_traits<Packet8i> { typedef int type; typedef Packet4i half; enum {size=8}; };
template<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(const float& from) { return _mm256_set1_ps(from); }
template<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(const double& from) { return _mm256_set1_pd(from); }
@@ -141,7 +141,16 @@ template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f&
return _mm256_fmadd_ps(a,b,c);
#endif
}
-template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) { return _mm256_fmadd_pd(a,b,c); }
+template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
+#if defined(__clang__) || defined(__GNUC__)
+ // see above
+ Packet4d res = c;
+ asm("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
+ return res;
+#else
+ return _mm256_fmadd_pd(a,b,c);
+#endif
+}
#endif
template<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_min_ps(a,b); }
@@ -189,6 +198,13 @@ template<> EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(const double* from)
return _mm256_blend_pd(tmp1,_mm256_permute2f128_pd(tmp2,tmp2,1),12);
}
+// Loads 2 floats from memory a returns the packet {a0, a0 a0, a0, a1, a1, a1, a1}
+template<> EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(const float* from)
+{
+ Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
+ return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
+}
+
template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }
template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }
template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
@@ -345,6 +361,11 @@ template<> EIGEN_STRONG_INLINE double predux<Packet4d>(const Packet4d& a)
return pfirst(_mm256_hadd_pd(tmp0,tmp0));
}
+template<> EIGEN_STRONG_INLINE Packet4f predux4<Packet8f>(const Packet8f& a)
+{
+ return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
+}
+
template<> EIGEN_STRONG_INLINE float predux_mul<Packet8f>(const Packet8f& a)
{
Packet8f tmp;
diff --git a/Eigen/src/Core/arch/AltiVec/PacketMath.h b/Eigen/src/Core/arch/AltiVec/PacketMath.h
index 5d7a16f5c..16948264f 100755
--- a/Eigen/src/Core/arch/AltiVec/PacketMath.h
+++ b/Eigen/src/Core/arch/AltiVec/PacketMath.h
@@ -99,8 +99,8 @@ template<> struct packet_traits<int> : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; };
-template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; };
+template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; typedef Packet4f half; };
+template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; typedef Packet4i half; };
/*
inline std::ostream & operator <<(std::ostream & s, const Packet4f & v)
{
diff --git a/Eigen/src/Core/arch/NEON/Complex.h b/Eigen/src/Core/arch/NEON/Complex.h
index e49c1a873..7ca76714f 100644
--- a/Eigen/src/Core/arch/NEON/Complex.h
+++ b/Eigen/src/Core/arch/NEON/Complex.h
@@ -47,7 +47,7 @@ template<> struct packet_traits<std::complex<float> > : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2}; };
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2}; typedef Packet2cf half; };
template<> EIGEN_STRONG_INLINE Packet2cf pset1<Packet2cf>(const std::complex<float>& from)
{
diff --git a/Eigen/src/Core/arch/NEON/PacketMath.h b/Eigen/src/Core/arch/NEON/PacketMath.h
index fae7b55fc..83150507a 100644
--- a/Eigen/src/Core/arch/NEON/PacketMath.h
+++ b/Eigen/src/Core/arch/NEON/PacketMath.h
@@ -101,8 +101,8 @@ EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q
EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
#endif
-template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; };
-template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; };
+template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; typedef Packet4f half; };
+template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; typedef Packet4i half; };
template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float& from) { return vdupq_n_f32(from); }
template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int& from) { return vdupq_n_s32(from); }
diff --git a/Eigen/src/Core/arch/SSE/Complex.h b/Eigen/src/Core/arch/SSE/Complex.h
index e54ebbf90..715e5a13c 100644
--- a/Eigen/src/Core/arch/SSE/Complex.h
+++ b/Eigen/src/Core/arch/SSE/Complex.h
@@ -49,7 +49,7 @@ template<> struct packet_traits<std::complex<float> > : default_packet_traits
};
#endif
-template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2}; };
+template<> struct unpacket_traits<Packet2cf> { typedef std::complex<float> type; enum {size=2}; typedef Packet2cf half; };
template<> EIGEN_STRONG_INLINE Packet2cf padd<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_add_ps(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet2cf psub<Packet2cf>(const Packet2cf& a, const Packet2cf& b) { return Packet2cf(_mm_sub_ps(a.v,b.v)); }
@@ -296,7 +296,7 @@ template<> struct packet_traits<std::complex<double> > : default_packet_traits
};
#endif
-template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1}; };
+template<> struct unpacket_traits<Packet1cd> { typedef std::complex<double> type; enum {size=1}; typedef Packet1cd half; };
template<> EIGEN_STRONG_INLINE Packet1cd padd<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_add_pd(a.v,b.v)); }
template<> EIGEN_STRONG_INLINE Packet1cd psub<Packet1cd>(const Packet1cd& a, const Packet1cd& b) { return Packet1cd(_mm_sub_pd(a.v,b.v)); }
diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h
index bc17726b4..89dfa6975 100755
--- a/Eigen/src/Core/arch/SSE/PacketMath.h
+++ b/Eigen/src/Core/arch/SSE/PacketMath.h
@@ -107,9 +107,9 @@ template<> struct packet_traits<int> : default_packet_traits
};
};
-template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; };
-template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2}; };
-template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; };
+template<> struct unpacket_traits<Packet4f> { typedef float type; enum {size=4}; typedef Packet4f half; };
+template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2}; typedef Packet2d half; };
+template<> struct unpacket_traits<Packet4i> { typedef int type; enum {size=4}; typedef Packet4i half; };
#if defined(_MSC_VER) && (_MSC_VER==1500)
// Workaround MSVC 9 internal compiler error.
diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
index d9e659c9a..df30fdd3e 100644
--- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h
+++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -10,6 +10,12 @@
#ifndef EIGEN_GENERAL_BLOCK_PANEL_H
#define EIGEN_GENERAL_BLOCK_PANEL_H
+#ifdef USE_IACA
+#include "iacaMarks.h"
+#else
+#define IACA_START
+#define IACA_END
+#endif
namespace Eigen {
@@ -92,12 +98,22 @@ void computeProductBlockingSizes(SizeType& k, SizeType& m, SizeType& n)
};
manage_caching_sizes(GetAction, &l1, &l2);
- k = std::min<SizeType>(k, l1/kdiv);
- SizeType _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0;
- if(_m<m) m = _m & mr_mask;
+
+// k = std::min<SizeType>(k, l1/kdiv);
+// SizeType _m = k>0 ? l2/(4 * sizeof(LhsScalar) * k) : 0;
+// if(_m<m) m = _m & mr_mask;
- m = 1024;
- k = 256;
+ // In unit tests we do not want to use extra large matrices,
+ // so we reduce the block size to check the blocking strategy is not flawed
+#ifndef EIGEN_DEBUG_SMALL_PRODUCT_BLOCKS
+ k = std::min<SizeType>(k,240);
+ n = std::min<SizeType>(n,3840/sizeof(RhsScalar));
+ m = std::min<SizeType>(m,3840/sizeof(RhsScalar));
+#else
+ k = std::min<SizeType>(k,24);
+ n = std::min<SizeType>(n,384/sizeof(RhsScalar));
+ m = std::min<SizeType>(m,384/sizeof(RhsScalar));
+#endif
}
template<typename LhsScalar, typename RhsScalar, typename SizeType>
@@ -164,11 +180,15 @@ public:
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
- // register block size along the N direction (must be either 4 or 8)
- nr = NumberOfRegisters/2,
+ // register block size along the N direction (must be either 2 or 4)
+ nr = 4,//NumberOfRegisters/4,
// register block size along the M direction (currently, this one cannot be modified)
- mr = LhsPacketSize,
+#ifdef __FMA__
+ mr = 3*LhsPacketSize,
+#else
+ mr = 2*LhsPacketSize,
+#endif
LhsProgress = LhsPacketSize,
RhsProgress = 1
@@ -198,23 +218,32 @@ public:
{
pbroadcast2(b, b0, b1);
}
-
- EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacket& dest) const
+
+ template<typename RhsPacketType>
+ EIGEN_STRONG_INLINE void loadRhs(const RhsScalar* b, RhsPacketType& dest) const
{
- dest = pset1<RhsPacket>(*b);
+ dest = pset1<RhsPacketType>(*b);
+ }
+
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
+ {
+ dest = ploadquad<RhsPacket>(b);
}
- EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
+ template<typename LhsPacketType>
+ EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacketType& dest) const
{
- dest = pload<LhsPacket>(a);
+ dest = pload<LhsPacketType>(a);
}
- EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacket& dest) const
+ template<typename LhsPacketType>
+ EIGEN_STRONG_INLINE void loadLhsUnaligned(const LhsScalar* a, LhsPacketType& dest) const
{
- dest = ploadu<LhsPacket>(a);
+ dest = ploadu<LhsPacketType>(a);
}
- EIGEN_STRONG_INLINE void madd(const LhsPacket& a, const RhsPacket& b, AccPacket& c, AccPacket& tmp) const
+ template<typename LhsPacketType, typename RhsPacketType, typename AccPacketType>
+ EIGEN_STRONG_INLINE void madd(const LhsPacketType& a, const RhsPacketType& b, AccPacketType& c, AccPacketType& tmp) const
{
// It would be a lot cleaner to call pmadd all the time. Unfortunately if we
// let gcc allocate the register in which to store the result of the pmul
@@ -232,6 +261,12 @@ public:
{
r = pmadd(c,alpha,r);
}
+
+ template<typename ResPacketHalf>
+ EIGEN_STRONG_INLINE void acc(const ResPacketHalf& c, const ResPacketHalf& alpha, ResPacketHalf& r) const
+ {
+ r = pmadd(c,alpha,r);
+ }
protected:
// conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
@@ -281,6 +316,11 @@ public:
{
dest = pset1<RhsPacket>(*b);
}
+
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, RhsPacket& dest) const
+ {
+ dest = pset1<RhsPacket>(*b);
+ }
EIGEN_STRONG_INLINE void loadLhs(const LhsScalar* a, LhsPacket& dest) const
{
@@ -346,7 +386,23 @@ DoublePacket<Packet> padd(const DoublePacket<Packet> &a, const DoublePacket<Pack
res.second = padd(a.second,b.second);
return res;
}
-
+
+template<typename Packet>
+const DoublePacket<Packet>& predux4(const DoublePacket<Packet> &a)
+{
+ return a;
+}
+
+template<typename Packet> struct unpacket_traits<DoublePacket<Packet> > { typedef DoublePacket<Packet> half; };
+// template<typename Packet>
+// DoublePacket<Packet> pmadd(const DoublePacket<Packet> &a, const DoublePacket<Packet> &b)
+// {
+// DoublePacket<Packet> res;
+// res.first = padd(a.first, b.first);
+// res.second = padd(a.second,b.second);
+// return res;
+// }
+
template<typename RealScalar, bool _ConjLhs, bool _ConjRhs>
class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
{
@@ -404,6 +460,16 @@ public:
dest.second = pset1<RealPacket>(imag(*b));
}
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, ResPacket& dest) const
+ {
+ loadRhs(b,dest);
+ }
+ EIGEN_STRONG_INLINE void loadRhsQuad(const RhsScalar* b, DoublePacketType& dest) const
+ {
+ eigen_internal_assert(unpacket_traits<ScalarPacket>::size<=4);
+ loadRhs(b,dest);
+ }
+
// linking error if instantiated without being optimized out:
void broadcastRhs(const RhsScalar* b, RhsPacket& b0, RhsPacket& b1, RhsPacket& b2, RhsPacket& b3);
@@ -619,30 +685,36 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
if(strideA==-1) strideA = depth;
if(strideB==-1) strideB = depth;
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
- Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
- // Here we assume that mr==LhsProgress
- const Index peeled_mc = (rows/mr)*mr;
+ const Index peeled_mc3 = mr>=3*Traits::LhsProgress ? (rows/(3*LhsProgress))*(3*LhsProgress) : 0;
+ const Index peeled_mc2 = mr>=2*Traits::LhsProgress ? peeled_mc3+((rows-peeled_mc3)/(2*LhsProgress))*(2*LhsProgress) : 0;
+ const Index peeled_mc1 = mr>=1*Traits::LhsProgress ? (rows/(1*LhsProgress))*(1*LhsProgress) : 0;
enum { pk = 8 }; // NOTE Such a large peeling factor is important for large matrices (~ +5% when >1000 on Haswell)
const Index peeled_kc = depth & ~(pk-1);
- const Index depth2 = depth & ~1;
-
- // loops on each micro vertical panel of rhs (depth x nr)
- // First pass using depth x 8 panels
- if(nr>=8)
+// const Index depth2 = depth & ~1;
+
+// std::cout << mr << " " << peeled_mc3 << " " << peeled_mc2 << " " << peeled_mc1 << "\n";
+
+
+ //---------- Process 3 * LhsProgress rows at once ----------
+ // This corresponds to 3*LhsProgress x nr register blocks.
+ // Usually, make sense only with FMA
+ if(mr>=3*Traits::LhsProgress)
{
- for(Index j2=0; j2<packet_cols8; j2+=nr)
+ // loops on each largest micro horizontal panel of lhs (3*Traits::LhsProgress x depth)
+ for(Index i=0; i<peeled_mc3; i+=3*Traits::LhsProgress)
{
- // loops on each largest micro horizontal panel of lhs (mr x depth)
- // => we select a mr x nr micro block of res which is entirely
- // stored into mr/packet_size x nr registers.
- for(Index i=0; i<peeled_mc; i+=mr)
+ // loops on each largest micro vertical panel of rhs (depth * nr)
+ for(Index j2=0; j2<packet_cols4; j2+=nr)
{
- const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
+ // We select a 3*Traits::LhsProgress x nr micro block of res which is entirely
+ // stored into 3 x nr registers.
+
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*Traits::LhsProgress)];
prefetch(&blA[0]);
// gets res block as register
- AccPacket C0, C1, C2, C3, C4, C5, C6, C7;
+ AccPacket C0, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11;
traits.initAcc(C0);
traits.initAcc(C1);
traits.initAcc(C2);
@@ -651,282 +723,390 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
traits.initAcc(C5);
traits.initAcc(C6);
traits.initAcc(C7);
+ traits.initAcc(C8);
+ traits.initAcc(C9);
+ traits.initAcc(C10);
+ traits.initAcc(C11);
ResScalar* r0 = &res[(j2+0)*resStride + i];
+ ResScalar* r1 = &res[(j2+1)*resStride + i];
+ ResScalar* r2 = &res[(j2+2)*resStride + i];
+ ResScalar* r3 = &res[(j2+3)*resStride + i];
+
+ internal::prefetch(r0);
+ internal::prefetch(r1);
+ internal::prefetch(r2);
+ internal::prefetch(r3);
// performs "inner" products
const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
- LhsPacket A0;
- // uncomment for register prefetching
- // LhsPacket A1;
- // traits.loadLhs(blA, A0);
+ prefetch(&blB[0]);
+ LhsPacket A0, A1;
+
for(Index k=0; k<peeled_kc; k+=pk)
{
- EIGEN_ASM_COMMENT("begin gegp micro kernel 1p x 8");
- RhsPacket B_0, B1, B2, B3;
-
- // NOTE The following version is faster on some architures
- // but sometimes leads to segfaults because it might read one packet outside the bounds
- // To test it, you also need to uncomment the initialization of A0 above and the copy of A1 to A0 below.
-#if 0
-#define EIGEN_GEBGP_ONESTEP8(K,L,M) \
- traits.loadLhs(&blA[(K+1)*LhsProgress], L); \
- traits.broadcastRhs(&blB[0+8*K*RhsProgress], B_0, B1, B2, B3); \
- traits.madd(M, B_0,C0, B_0); \
- traits.madd(M, B1, C1, B1); \
- traits.madd(M, B2, C2, B2); \
- traits.madd(M, B3, C3, B3); \
- traits.broadcastRhs(&blB[4+8*K*RhsProgress], B_0, B1, B2, B3); \
- traits.madd(M, B_0,C4, B_0); \
- traits.madd(M, B1, C5, B1); \
- traits.madd(M, B2, C6, B2); \
- traits.madd(M, B3, C7, B3)
-#endif
-
-#define EIGEN_GEBGP_ONESTEP8(K,L,M) \
- traits.loadLhs(&blA[K*LhsProgress], A0); \
- traits.broadcastRhs(&blB[0+8*K*RhsProgress], B_0, B1, B2, B3); \
- traits.madd(A0, B_0,C0, B_0); \
- traits.madd(A0, B1, C1, B1); \
- traits.madd(A0, B2, C2, B2); \
- traits.madd(A0, B3, C3, B3); \
- traits.broadcastRhs(&blB[4+8*K*RhsProgress], B_0, B1, B2, B3); \
- traits.madd(A0, B_0,C4, B_0); \
- traits.madd(A0, B1, C5, B1); \
- traits.madd(A0, B2, C6, B2); \
- traits.madd(A0, B3, C7, B3)
+ IACA_START
+ EIGEN_ASM_COMMENT("begin gegp micro kernel 3p x 4");
+ RhsPacket B_0;
+ LhsPacket A2;
- EIGEN_GEBGP_ONESTEP8(0,A1,A0);
- EIGEN_GEBGP_ONESTEP8(1,A0,A1);
- EIGEN_GEBGP_ONESTEP8(2,A1,A0);
- EIGEN_GEBGP_ONESTEP8(3,A0,A1);
- EIGEN_GEBGP_ONESTEP8(4,A1,A0);
- EIGEN_GEBGP_ONESTEP8(5,A0,A1);
- EIGEN_GEBGP_ONESTEP8(6,A1,A0);
- EIGEN_GEBGP_ONESTEP8(7,A0,A1);
-
- blB += pk*8*RhsProgress;
- blA += pk*mr;
+#define EIGEN_GEBGP_ONESTEP(K) \
+ internal::prefetch(blA+(3*K+16)*LhsProgress); \
+ traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
+ traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
+ traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
+ traits.loadRhs(&blB[(0+4*K)*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C0, B_0); \
+ traits.madd(A1, B_0, C4, B_0); \
+ traits.madd(A2, B_0, C8, B_0); \
+ traits.loadRhs(&blB[1+4*K*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C1, B_0); \
+ traits.madd(A1, B_0, C5, B_0); \
+ traits.madd(A2, B_0, C9, B_0); \
+ traits.loadRhs(&blB[2+4*K*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C2, B_0); \
+ traits.madd(A1, B_0, C6, B_0); \
+ traits.madd(A2, B_0, C10, B_0); \
+ traits.loadRhs(&blB[3+4*K*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C3 , B_0); \
+ traits.madd(A1, B_0, C7, B_0); \
+ traits.madd(A2, B_0, C11, B_0)
+
+ internal::prefetch(blB+(48+0));
+ EIGEN_GEBGP_ONESTEP(0);
+ EIGEN_GEBGP_ONESTEP(1);
+ EIGEN_GEBGP_ONESTEP(2);
+ EIGEN_GEBGP_ONESTEP(3);
+ internal::prefetch(blB+(48+16));
+ EIGEN_GEBGP_ONESTEP(4);
+ EIGEN_GEBGP_ONESTEP(5);
+ EIGEN_GEBGP_ONESTEP(6);
+ EIGEN_GEBGP_ONESTEP(7);
+
+ blB += pk*4*RhsProgress;
+ blA += pk*3*Traits::LhsProgress;
+ IACA_END
}
// process remaining peeled loop
for(Index k=peeled_kc; k<depth; k++)
{
- RhsPacket B_0, B1, B2, B3;
- EIGEN_GEBGP_ONESTEP8(0,A1,A0);
- // uncomment for register prefetching
- // A0 = A1;
+ RhsPacket B_0;
+ LhsPacket A2;
+ EIGEN_GEBGP_ONESTEP(0);
+ blB += 4*RhsProgress;
+ blA += 3*Traits::LhsProgress;
+ }
+ #undef EIGEN_GEBGP_ONESTEP
+
+ ResPacket R0, R1, R2;
+ ResPacket alphav = pset1<ResPacket>(alpha);
+
+ R0 = ploadu<ResPacket>(r0+0*Traits::ResPacketSize);
+ R1 = ploadu<ResPacket>(r0+1*Traits::ResPacketSize);
+ R2 = ploadu<ResPacket>(r0+2*Traits::ResPacketSize);
+ traits.acc(C0, alphav, R0);
+ traits.acc(C4, alphav, R1);
+ traits.acc(C8, alphav, R2);
+ pstoreu(r0+0*Traits::ResPacketSize, R0);
+ pstoreu(r0+1*Traits::ResPacketSize, R1);
+ pstoreu(r0+2*Traits::ResPacketSize, R2);
+
+ R0 = ploadu<ResPacket>(r1+0*Traits::ResPacketSize);
+ R1 = ploadu<ResPacket>(r1+1*Traits::ResPacketSize);
+ R2 = ploadu<ResPacket>(r1+2*Traits::ResPacketSize);
+ traits.acc(C1, alphav, R0);
+ traits.acc(C5, alphav, R1);
+ traits.acc(C9, alphav, R2);
+ pstoreu(r1+0*Traits::ResPacketSize, R0);
+ pstoreu(r1+1*Traits::ResPacketSize, R1);
+ pstoreu(r1+2*Traits::ResPacketSize, R2);
+
+ R0 = ploadu<ResPacket>(r2+0*Traits::ResPacketSize);
+ R1 = ploadu<ResPacket>(r2+1*Traits::ResPacketSize);
+ R2 = ploadu<ResPacket>(r2+2*Traits::ResPacketSize);
+ traits.acc(C2, alphav, R0);
+ traits.acc(C6, alphav, R1);
+ traits.acc(C10, alphav, R2);
+ pstoreu(r2+0*Traits::ResPacketSize, R0);
+ pstoreu(r2+1*Traits::ResPacketSize, R1);
+ pstoreu(r2+2*Traits::ResPacketSize, R2);
+
+ R0 = ploadu<ResPacket>(r3+0*Traits::ResPacketSize);
+ R1 = ploadu<ResPacket>(r3+1*Traits::ResPacketSize);
+ R2 = ploadu<ResPacket>(r3+2*Traits::ResPacketSize);
+ traits.acc(C3, alphav, R0);
+ traits.acc(C7, alphav, R1);
+ traits.acc(C11, alphav, R2);
+ pstoreu(r3+0*Traits::ResPacketSize, R0);
+ pstoreu(r3+1*Traits::ResPacketSize, R1);
+ pstoreu(r3+2*Traits::ResPacketSize, R2);
+ }
+
+ // Deal with remaining columns of the rhs
+ for(Index j2=packet_cols4; j2<cols; j2++)
+ {
+ // One column at a time
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*(3*Traits::LhsProgress)];
+ prefetch(&blA[0]);
+
+ // gets res block as register
+ AccPacket C0, C4, C8;
+ traits.initAcc(C0);
+ traits.initAcc(C4);
+ traits.initAcc(C8);
+
+ ResScalar* r0 = &res[(j2+0)*resStride + i];
- blB += 8*RhsProgress;
- blA += mr;
+ // performs "inner" products
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB];
+ LhsPacket A0, A1, A2;
+
+ for(Index k=0; k<peeled_kc; k+=pk)
+ {
+ EIGEN_ASM_COMMENT("begin gegp micro kernel 3p x 1");
+ RhsPacket B_0;
+#define EIGEN_GEBGP_ONESTEP(K) \
+ traits.loadLhs(&blA[(0+3*K)*LhsProgress], A0); \
+ traits.loadLhs(&blA[(1+3*K)*LhsProgress], A1); \
+ traits.loadLhs(&blA[(2+3*K)*LhsProgress], A2); \
+ traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C0, B_0); \
+ traits.madd(A1, B_0, C4, B_0); \
+ traits.madd(A2, B_0, C8, B_0)
+
+ EIGEN_GEBGP_ONESTEP(0);
+ EIGEN_GEBGP_ONESTEP(1);
+ EIGEN_GEBGP_ONESTEP(2);
+ EIGEN_GEBGP_ONESTEP(3);
+ EIGEN_GEBGP_ONESTEP(4);
+ EIGEN_GEBGP_ONESTEP(5);
+ EIGEN_GEBGP_ONESTEP(6);
+ EIGEN_GEBGP_ONESTEP(7);
+
+ blB += pk*RhsProgress;
+ blA += pk*3*Traits::LhsProgress;
}
- #undef EIGEN_GEBGP_ONESTEP8
- ResPacket R0, R1, R2, R3, R4, R5, R6;
+ // process remaining peeled loop
+ for(Index k=peeled_kc; k<depth; k++)
+ {
+ RhsPacket B_0;
+ EIGEN_GEBGP_ONESTEP(0);
+ blB += RhsProgress;
+ blA += 3*Traits::LhsProgress;
+ }
+#undef EIGEN_GEBGP_ONESTEP
+ ResPacket R0, R1, R2;
ResPacket alphav = pset1<ResPacket>(alpha);
- R0 = ploadu<ResPacket>(r0+0*resStride);
- R1 = ploadu<ResPacket>(r0+1*resStride);
- R2 = ploadu<ResPacket>(r0+2*resStride);
- R3 = ploadu<ResPacket>(r0+3*resStride);
- R4 = ploadu<ResPacket>(r0+4*resStride);
- R5 = ploadu<ResPacket>(r0+5*resStride);
- R6 = ploadu<ResPacket>(r0+6*resStride);
+ R0 = ploadu<ResPacket>(r0+0*Traits::ResPacketSize);
+ R1 = ploadu<ResPacket>(r0+1*Traits::ResPacketSize);
+ R2 = ploadu<ResPacket>(r0+2*Traits::ResPacketSize);
traits.acc(C0, alphav, R0);
- pstoreu(r0+0*resStride, R0);
- R0 = ploadu<ResPacket>(r0+7*resStride);
-
- traits.acc(C1, alphav, R1);
- traits.acc(C2, alphav, R2);
- traits.acc(C3, alphav, R3);
- traits.acc(C4, alphav, R4);
- traits.acc(C5, alphav, R5);
- traits.acc(C6, alphav, R6);
- traits.acc(C7, alphav, R0);
-
- pstoreu(r0+1*resStride, R1);
- pstoreu(r0+2*resStride, R2);
- pstoreu(r0+3*resStride, R3);
- pstoreu(r0+4*resStride, R4);
- pstoreu(r0+5*resStride, R5);
- pstoreu(r0+6*resStride, R6);
- pstoreu(r0+7*resStride, R0);
+ traits.acc(C4, alphav, R1);
+ traits.acc(C8 , alphav, R2);
+ pstoreu(r0+0*Traits::ResPacketSize, R0);
+ pstoreu(r0+1*Traits::ResPacketSize, R1);
+ pstoreu(r0+2*Traits::ResPacketSize, R2);
}
-
- // Deal with remaining rows of the lhs
- // TODO we should vectorize if <= 8, and not strictly ==
- if(SwappedTraits::LhsProgress == 8)
+ }
+ }
+
+ //---------- Process 2 * LhsProgress rows at once ----------
+ if(mr>=2*Traits::LhsProgress)
+ {
+ // loops on each largest micro horizontal panel of lhs (2*LhsProgress x depth)
+ for(Index i=peeled_mc3; i<peeled_mc2; i+=2*LhsProgress)
+ {
+ // loops on each largest micro vertical panel of rhs (depth * nr)
+ for(Index j2=0; j2<packet_cols4; j2+=nr)
{
- // Apply the same logic but with reversed operands
- // To improve pipelining, we process 2 rows at once and accumulate even and odd products along the k dimension
- // into two different packets.
- typedef gebp_traits<RhsScalar,LhsScalar,ConjugateRhs,ConjugateLhs> SwappedTraits;
- typedef typename SwappedTraits::ResScalar SResScalar;
- typedef typename SwappedTraits::LhsPacket SLhsPacket;
- typedef typename SwappedTraits::RhsPacket SRhsPacket;
- typedef typename SwappedTraits::ResPacket SResPacket;
- typedef typename SwappedTraits::AccPacket SAccPacket;
- SwappedTraits straits;
+ // We select a 2*Traits::LhsProgress x nr micro block of res which is entirely
+ // stored into 2 x nr registers.
+
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
+ prefetch(&blA[0]);
+
+ // gets res block as register
+ AccPacket C0, C1, C2, C3, C4, C5, C6, C7;
+ traits.initAcc(C0);
+ traits.initAcc(C1);
+ traits.initAcc(C2);
+ traits.initAcc(C3);
+ traits.initAcc(C4);
+ traits.initAcc(C5);
+ traits.initAcc(C6);
+ traits.initAcc(C7);
+
+ ResScalar* r0 = &res[(j2+0)*resStride + i];
+ ResScalar* r1 = &res[(j2+1)*resStride + i];
+ ResScalar* r2 = &res[(j2+2)*resStride + i];
+ ResScalar* r3 = &res[(j2+3)*resStride + i];
- Index rows2 = (rows & ~1);
- for(Index i=peeled_mc; i<rows2; i+=2)
+ internal::prefetch(r0);
+ internal::prefetch(r1);
+ internal::prefetch(r2);
+ internal::prefetch(r3);
+
+ // performs "inner" products
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
+ prefetch(&blB[0]);
+ LhsPacket A0, A1;
+
+ for(Index k=0; k<peeled_kc; k+=pk)
{
- const LhsScalar* blA = &blockA[i*strideA+offsetA];
- prefetch(&blA[0]);
- const RhsScalar* blB = &blockB[j2*strideB+offsetB*8];
-
- EIGEN_ASM_COMMENT("begin_vectorized_multiplication_of_last_rows 2x8");
-
- SAccPacket C0,C1, C2,C3;
- straits.initAcc(C0); // even
- straits.initAcc(C1); // odd
- straits.initAcc(C2); // even
- straits.initAcc(C3); // odd
- for(Index k=0; k<depth2; k+=2)
- {
- SLhsPacket A0, A1;
- straits.loadLhsUnaligned(blB+0, A0);
- straits.loadLhsUnaligned(blB+8, A1);
- SRhsPacket B_0, B_1, B_2, B_3;
- straits.loadRhs(blA+k+0, B_0);
- straits.loadRhs(blA+k+1, B_1);
- straits.loadRhs(blA+strideA+k+0, B_2);
- straits.loadRhs(blA+strideA+k+1, B_3);
- straits.madd(A0,B_0,C0,B_0);
- straits.madd(A1,B_1,C1,B_1);
- straits.madd(A0,B_2,C2,B_2);
- straits.madd(A1,B_3,C3,B_3);
- blB += 2*nr;
- }
- if(depth2<depth)
- {
- Index k = depth-1;
- SLhsPacket A0;
- straits.loadLhsUnaligned(blB+0, A0);
- SRhsPacket B_0, B_2;
- straits.loadRhs(blA+k+0, B_0);
- straits.loadRhs(blA+strideA+k+0, B_2);
- straits.madd(A0,B_0,C0,B_0);
- straits.madd(A0,B_2,C2,B_2);
- }
- SResPacket R = pgather<SResScalar, SResPacket>(&res[j2*resStride + i], resStride);
- SResPacket alphav = pset1<SResPacket>(alpha);
- straits.acc(padd(C0,C1), alphav, R);
- pscatter(&res[j2*resStride + i], R, resStride);
-
- R = pgather<SResScalar, SResPacket>(&res[j2*resStride + i + 1], resStride);
- straits.acc(padd(C2,C3), alphav, R);
- pscatter(&res[j2*resStride + i + 1], R, resStride);
-
- EIGEN_ASM_COMMENT("end_vectorized_multiplication_of_last_rows 8");
+ IACA_START
+ EIGEN_ASM_COMMENT("begin gegp micro kernel 2p x 4");
+ RhsPacket B_0, B1;
+
+#define EIGEN_GEBGP_ONESTEP(K) \
+ traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \
+ traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \
+ traits.loadRhs(&blB[(0+4*K)*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C0, B1); \
+ traits.madd(A1, B_0, C4, B_0); \
+ traits.loadRhs(&blB[1+4*K*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C1, B1); \
+ traits.madd(A1, B_0, C5, B_0); \
+ traits.loadRhs(&blB[2+4*K*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C2, B1); \
+ traits.madd(A1, B_0, C6, B_0); \
+ traits.loadRhs(&blB[3+4*K*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C3 , B1); \
+ traits.madd(A1, B_0, C7, B_0)
+
+ internal::prefetch(blB+(48+0));
+ EIGEN_GEBGP_ONESTEP(0);
+ EIGEN_GEBGP_ONESTEP(1);
+ EIGEN_GEBGP_ONESTEP(2);
+ EIGEN_GEBGP_ONESTEP(3);
+ internal::prefetch(blB+(48+16));
+ EIGEN_GEBGP_ONESTEP(4);
+ EIGEN_GEBGP_ONESTEP(5);
+ EIGEN_GEBGP_ONESTEP(6);
+ EIGEN_GEBGP_ONESTEP(7);
+
+ blB += pk*4*RhsProgress;
+ blA += pk*(2*Traits::LhsProgress);
+ IACA_END
}
- if(rows2!=rows)
+ // process remaining peeled loop
+ for(Index k=peeled_kc; k<depth; k++)
{
- Index i = rows-1;
- const LhsScalar* blA = &blockA[i*strideA+offsetA];
- prefetch(&blA[0]);
- const RhsScalar* blB = &blockB[j2*strideB+offsetB*8];
-
- EIGEN_ASM_COMMENT("begin_vectorized_multiplication_of_last_rows 8");
-
- SAccPacket C0,C1;
- straits.initAcc(C0); // even
- straits.initAcc(C1); // odd
-
- for(Index k=0; k<depth2; k+=2)
- {
- SLhsPacket A0, A1;
- straits.loadLhsUnaligned(blB+0, A0);
- straits.loadLhsUnaligned(blB+8, A1);
- SRhsPacket B_0, B_1;
- straits.loadRhs(blA+k+0, B_0);
- straits.loadRhs(blA+k+1, B_1);
- straits.madd(A0,B_0,C0,B_0);
- straits.madd(A1,B_1,C1,B_1);
- blB += 2*8;
- }
- if(depth!=depth2)
- {
- Index k = depth-1;
- SLhsPacket A0;
- straits.loadLhsUnaligned(blB+0, A0);
- SRhsPacket B_0;
- straits.loadRhs(blA+k+0, B_0);
- straits.madd(A0,B_0,C0,B_0);
- }
- SResPacket R = pgather<SResScalar, SResPacket>(&res[j2*resStride + i], resStride);
- SResPacket alphav = pset1<SResPacket>(alpha);
- straits.acc(padd(C0,C1), alphav, R);
- pscatter(&res[j2*resStride + i], R, resStride);
+ RhsPacket B_0, B1;
+ EIGEN_GEBGP_ONESTEP(0);
+ blB += 4*RhsProgress;
+ blA += 2*Traits::LhsProgress;
}
+#undef EIGEN_GEBGP_ONESTEP
+
+ ResPacket R0, R1, R2, R3;
+ ResPacket alphav = pset1<ResPacket>(alpha);
+
+ R0 = ploadu<ResPacket>(r0+0*Traits::ResPacketSize);
+ R1 = ploadu<ResPacket>(r0+1*Traits::ResPacketSize);
+ R2 = ploadu<ResPacket>(r1+0*Traits::ResPacketSize);
+ R3 = ploadu<ResPacket>(r1+1*Traits::ResPacketSize);
+ traits.acc(C0, alphav, R0);
+ traits.acc(C4, alphav, R1);
+ traits.acc(C1, alphav, R2);
+ traits.acc(C5, alphav, R3);
+ pstoreu(r0+0*Traits::ResPacketSize, R0);
+ pstoreu(r0+1*Traits::ResPacketSize, R1);
+ pstoreu(r1+0*Traits::ResPacketSize, R2);
+ pstoreu(r1+1*Traits::ResPacketSize, R3);
+
+ R0 = ploadu<ResPacket>(r2+0*Traits::ResPacketSize);
+ R1 = ploadu<ResPacket>(r2+1*Traits::ResPacketSize);
+ R2 = ploadu<ResPacket>(r3+0*Traits::ResPacketSize);
+ R3 = ploadu<ResPacket>(r3+1*Traits::ResPacketSize);
+ traits.acc(C2, alphav, R0);
+ traits.acc(C6, alphav, R1);
+ traits.acc(C3, alphav, R2);
+ traits.acc(C7, alphav, R3);
+ pstoreu(r2+0*Traits::ResPacketSize, R0);
+ pstoreu(r2+1*Traits::ResPacketSize, R1);
+ pstoreu(r3+0*Traits::ResPacketSize, R2);
+ pstoreu(r3+1*Traits::ResPacketSize, R3);
}
- else
+
+ // Deal with remaining columns of the rhs
+ for(Index j2=packet_cols4; j2<cols; j2++)
{
- // Pure scalar path
- for(Index i=peeled_mc; i<rows; i++)
+ // One column at a time
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*(2*Traits::LhsProgress)];
+ prefetch(&blA[0]);
+
+ // gets res block as register
+ AccPacket C0, C4;
+ traits.initAcc(C0);
+ traits.initAcc(C4);
+
+ ResScalar* r0 = &res[(j2+0)*resStride + i];
+
+ // performs "inner" products
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB];
+ LhsPacket A0, A1;
+
+ for(Index k=0; k<peeled_kc; k+=pk)
{
- const LhsScalar* blA = &blockA[i*strideA+offsetA];
- prefetch(&blA[0]);
- const RhsScalar* blB = &blockB[j2*strideB+offsetB*8];
-
- // gets a 1 x 8 res block as registers
- ResScalar C0(0), C1(0), C2(0), C3(0), C4(0), C5(0), C6(0), C7(0);
+ EIGEN_ASM_COMMENT("begin gegp micro kernel 2p x 1");
+ RhsPacket B_0, B1;
+
+#define EIGEN_GEBGP_ONESTEP(K) \
+ traits.loadLhs(&blA[(0+2*K)*LhsProgress], A0); \
+ traits.loadLhs(&blA[(1+2*K)*LhsProgress], A1); \
+ traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C0, B1); \
+ traits.madd(A1, B_0, C4, B_0)
+
+ EIGEN_GEBGP_ONESTEP(0);
+ EIGEN_GEBGP_ONESTEP(1);
+ EIGEN_GEBGP_ONESTEP(2);
+ EIGEN_GEBGP_ONESTEP(3);
+ EIGEN_GEBGP_ONESTEP(4);
+ EIGEN_GEBGP_ONESTEP(5);
+ EIGEN_GEBGP_ONESTEP(6);
+ EIGEN_GEBGP_ONESTEP(7);
+
+ blB += pk*RhsProgress;
+ blA += pk*2*Traits::LhsProgress;
+ }
- for(Index k=0; k<depth; k++)
- {
- LhsScalar A0;
- RhsScalar B_0, B_1;
-
- A0 = blA[k];
-
- B_0 = blB[0];
- B_1 = blB[1];
- MADD(cj,A0,B_0,C0, B_0);
- MADD(cj,A0,B_1,C1, B_1);
-
- B_0 = blB[2];
- B_1 = blB[3];
- MADD(cj,A0,B_0,C2, B_0);
- MADD(cj,A0,B_1,C3, B_1);
-
- B_0 = blB[4];
- B_1 = blB[5];
- MADD(cj,A0,B_0,C4, B_0);
- MADD(cj,A0,B_1,C5, B_1);
-
- B_0 = blB[6];
- B_1 = blB[7];
- MADD(cj,A0,B_0,C6, B_0);
- MADD(cj,A0,B_1,C7, B_1);
-
- blB += 8;
- }
- res[(j2+0)*resStride + i] += alpha*C0;
- res[(j2+1)*resStride + i] += alpha*C1;
- res[(j2+2)*resStride + i] += alpha*C2;
- res[(j2+3)*resStride + i] += alpha*C3;
- res[(j2+4)*resStride + i] += alpha*C4;
- res[(j2+5)*resStride + i] += alpha*C5;
- res[(j2+6)*resStride + i] += alpha*C6;
- res[(j2+7)*resStride + i] += alpha*C7;
+ // process remaining peeled loop
+ for(Index k=peeled_kc; k<depth; k++)
+ {
+ RhsPacket B_0, B1;
+ EIGEN_GEBGP_ONESTEP(0);
+ blB += RhsProgress;
+ blA += 2*Traits::LhsProgress;
}
+#undef EIGEN_GEBGP_ONESTEP
+ ResPacket R0, R1;
+ ResPacket alphav = pset1<ResPacket>(alpha);
+
+ R0 = ploadu<ResPacket>(r0+0*Traits::ResPacketSize);
+ R1 = ploadu<ResPacket>(r0+1*Traits::ResPacketSize);
+ traits.acc(C0, alphav, R0);
+ traits.acc(C4, alphav, R1);
+ pstoreu(r0+0*Traits::ResPacketSize, R0);
+ pstoreu(r0+1*Traits::ResPacketSize, R1);
}
}
}
-
- // Second pass using depth x 4 panels
- // If nr==8, then we have at most one such panel
- // TODO: with 16 registers, we coud optimize this part to leverage more pipelinining,
- // for instance, by using a 2 packet * 4 kernel. Useful when the rhs is thin
- if(nr>=4)
+ //---------- Process 1 * LhsProgress rows at once ----------
+ if(mr>=1*Traits::LhsProgress)
{
- for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
+ // loops on each largest micro horizontal panel of lhs (1*LhsProgress x depth)
+ for(Index i=peeled_mc2; i<peeled_mc1; i+=1*LhsProgress)
{
- // loops on each largest micro horizontal panel of lhs (mr x depth)
- // => we select a mr x 4 micro block of res which is entirely
- // stored into mr/packet_size x 4 registers.
- for(Index i=0; i<peeled_mc; i+=mr)
+ // loops on each largest micro vertical panel of rhs (depth * nr)
+ for(Index j2=0; j2<packet_cols4; j2+=nr)
{
- const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
+ // We select a 1*Traits::LhsProgress x nr micro block of res which is entirely
+ // stored into 1 x nr registers.
+
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
prefetch(&blA[0]);
// gets res block as register
@@ -937,100 +1117,239 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
traits.initAcc(C3);
ResScalar* r0 = &res[(j2+0)*resStride + i];
+ ResScalar* r1 = &res[(j2+1)*resStride + i];
+ ResScalar* r2 = &res[(j2+2)*resStride + i];
+ ResScalar* r3 = &res[(j2+3)*resStride + i];
+
+ internal::prefetch(r0);
+ internal::prefetch(r1);
+ internal::prefetch(r2);
+ internal::prefetch(r3);
// performs "inner" products
- const RhsScalar* blB = &blockB[j2*strideB+offsetB*4];
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
+ prefetch(&blB[0]);
LhsPacket A0;
+
for(Index k=0; k<peeled_kc; k+=pk)
{
- EIGEN_ASM_COMMENT("begin gegp micro kernel 1p x 4");
-
+ IACA_START
+ EIGEN_ASM_COMMENT("begin gegp micro kernel 2p x 4");
RhsPacket B_0, B1;
-#define EIGEN_GEBGP_ONESTEP4(K) \
- traits.loadLhs(&blA[K*LhsProgress], A0); \
- traits.broadcastRhs(&blB[0+4*K*RhsProgress], B_0, B1); \
- traits.madd(A0, B_0,C0, B_0); \
- traits.madd(A0, B1, C1, B1); \
- traits.broadcastRhs(&blB[2+4*K*RhsProgress], B_0, B1); \
- traits.madd(A0, B_0,C2, B_0); \
- traits.madd(A0, B1, C3, B1)
-
- EIGEN_GEBGP_ONESTEP4(0);
- EIGEN_GEBGP_ONESTEP4(1);
- EIGEN_GEBGP_ONESTEP4(2);
- EIGEN_GEBGP_ONESTEP4(3);
- EIGEN_GEBGP_ONESTEP4(4);
- EIGEN_GEBGP_ONESTEP4(5);
- EIGEN_GEBGP_ONESTEP4(6);
- EIGEN_GEBGP_ONESTEP4(7);
+
+#define EIGEN_GEBGP_ONESTEP(K) \
+ traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \
+ traits.loadRhs(&blB[(0+4*K)*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C0, B1); \
+ traits.loadRhs(&blB[1+4*K*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C1, B1); \
+ traits.loadRhs(&blB[2+4*K*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C2, B1); \
+ traits.loadRhs(&blB[3+4*K*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C3 , B1); \
+
+ internal::prefetch(blB+(48+0));
+ EIGEN_GEBGP_ONESTEP(0);
+ EIGEN_GEBGP_ONESTEP(1);
+ EIGEN_GEBGP_ONESTEP(2);
+ EIGEN_GEBGP_ONESTEP(3);
+ internal::prefetch(blB+(48+16));
+ EIGEN_GEBGP_ONESTEP(4);
+ EIGEN_GEBGP_ONESTEP(5);
+ EIGEN_GEBGP_ONESTEP(6);
+ EIGEN_GEBGP_ONESTEP(7);
blB += pk*4*RhsProgress;
- blA += pk*mr;
+ blA += pk*(1*Traits::LhsProgress);
+ IACA_END
}
- // process remaining of peeled loop
+ // process remaining peeled loop
for(Index k=peeled_kc; k<depth; k++)
{
RhsPacket B_0, B1;
- EIGEN_GEBGP_ONESTEP4(0);
-
+ EIGEN_GEBGP_ONESTEP(0);
blB += 4*RhsProgress;
- blA += mr;
+ blA += 1*Traits::LhsProgress;
}
- #undef EIGEN_GEBGP_ONESTEP4
-
- ResPacket R0, R1, R2;
+#undef EIGEN_GEBGP_ONESTEP
+
+ ResPacket R0, R1;
ResPacket alphav = pset1<ResPacket>(alpha);
-
- R0 = ploadu<ResPacket>(r0+0*resStride);
- R1 = ploadu<ResPacket>(r0+1*resStride);
- R2 = ploadu<ResPacket>(r0+2*resStride);
+
+ R0 = ploadu<ResPacket>(r0+0*Traits::ResPacketSize);
+ R1 = ploadu<ResPacket>(r1+0*Traits::ResPacketSize);
traits.acc(C0, alphav, R0);
- pstoreu(r0+0*resStride, R0);
- R0 = ploadu<ResPacket>(r0+3*resStride);
-
- traits.acc(C1, alphav, R1);
- traits.acc(C2, alphav, R2);
- traits.acc(C3, alphav, R0);
+ traits.acc(C1, alphav, R1);
+ pstoreu(r0+0*Traits::ResPacketSize, R0);
+ pstoreu(r1+0*Traits::ResPacketSize, R1);
- pstoreu(r0+1*resStride, R1);
- pstoreu(r0+2*resStride, R2);
- pstoreu(r0+3*resStride, R0);
+ R0 = ploadu<ResPacket>(r2+0*Traits::ResPacketSize);
+ R1 = ploadu<ResPacket>(r3+0*Traits::ResPacketSize);
+ traits.acc(C2, alphav, R0);
+ traits.acc(C3, alphav, R1);
+ pstoreu(r2+0*Traits::ResPacketSize, R0);
+ pstoreu(r3+0*Traits::ResPacketSize, R1);
}
- for(Index i=peeled_mc; i<rows; i++)
+ // Deal with remaining columns of the rhs
+ for(Index j2=packet_cols4; j2<cols; j2++)
+ {
+ // One column at a time
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*(1*Traits::LhsProgress)];
+ prefetch(&blA[0]);
+
+ // gets res block as register
+ AccPacket C0;
+ traits.initAcc(C0);
+
+ ResScalar* r0 = &res[(j2+0)*resStride + i];
+
+ // performs "inner" products
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB];
+ LhsPacket A0;
+
+ for(Index k=0; k<peeled_kc; k+=pk)
+ {
+ EIGEN_ASM_COMMENT("begin gegp micro kernel 2p x 1");
+ RhsPacket B_0;
+
+#define EIGEN_GEBGP_ONESTEP(K) \
+ traits.loadLhs(&blA[(0+1*K)*LhsProgress], A0); \
+ traits.loadRhs(&blB[(0+K)*RhsProgress], B_0); \
+ traits.madd(A0, B_0, C0, B_0); \
+
+ EIGEN_GEBGP_ONESTEP(0);
+ EIGEN_GEBGP_ONESTEP(1);
+ EIGEN_GEBGP_ONESTEP(2);
+ EIGEN_GEBGP_ONESTEP(3);
+ EIGEN_GEBGP_ONESTEP(4);
+ EIGEN_GEBGP_ONESTEP(5);
+ EIGEN_GEBGP_ONESTEP(6);
+ EIGEN_GEBGP_ONESTEP(7);
+
+ blB += pk*RhsProgress;
+ blA += pk*1*Traits::LhsProgress;
+ }
+
+ // process remaining peeled loop
+ for(Index k=peeled_kc; k<depth; k++)
+ {
+ RhsPacket B_0;
+ EIGEN_GEBGP_ONESTEP(0);
+ blB += RhsProgress;
+ blA += 1*Traits::LhsProgress;
+ }
+#undef EIGEN_GEBGP_ONESTEP
+ ResPacket R0;
+ ResPacket alphav = pset1<ResPacket>(alpha);
+ R0 = ploadu<ResPacket>(r0+0*Traits::ResPacketSize);
+ traits.acc(C0, alphav, R0);
+ pstoreu(r0+0*Traits::ResPacketSize, R0);
+ }
+ }
+ }
+ //---------- Process remaining rows, 1 at once ----------
+ {
+ // loop on each row of the lhs (1*LhsProgress x depth)
+ for(Index i=peeled_mc1; i<rows; i+=1)
+ {
+ // loop on each panel of the rhs
+ for(Index j2=0; j2<packet_cols4; j2+=nr)
{
const LhsScalar* blA = &blockA[i*strideA+offsetA];
prefetch(&blA[0]);
- const RhsScalar* blB = &blockB[j2*strideB+offsetB*4];
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
- // TODO vectorize in more cases
- if(SwappedTraits::LhsProgress==4)
+ if( (SwappedTraits::LhsProgress % 4)==0 )
{
- EIGEN_ASM_COMMENT("begin_vectorized_multiplication_of_last_rows 1x4");
-
- SAccPacket C0;
+ // NOTE The following piece of code wont work for 512 bit registers
+ SAccPacket C0, C1, C2, C3;
straits.initAcc(C0);
- for(Index k=0; k<depth; k++)
+ straits.initAcc(C1);
+ straits.initAcc(C2);
+ straits.initAcc(C3);
+
+ const Index spk = (std::max)(1,SwappedTraits::LhsProgress/4);
+ const Index endk = (depth/spk)*spk;
+ const Index endk4 = (depth/(spk*4))*(spk*4);
+
+ Index k=0;
+ for(; k<endk4; k+=4*spk)
{
SLhsPacket A0;
- straits.loadLhsUnaligned(blB, A0);
SRhsPacket B_0;
- straits.loadRhs(&blA[k], B_0);
- SRhsPacket T0;
- straits.madd(A0,B_0,C0,T0);
- blB += 4;
+
+ straits.loadLhsUnaligned(blB+0*SwappedTraits::LhsProgress, A0);
+ straits.loadRhsQuad(blA+0*spk, B_0);
+ straits.madd(A0,B_0,C0,B_0);
+
+ straits.loadLhsUnaligned(blB+1*SwappedTraits::LhsProgress, A0);
+ straits.loadRhsQuad(blA+1*spk, B_0);
+ straits.madd(A0,B_0,C1,B_0);
+
+ straits.loadLhsUnaligned(blB+2*SwappedTraits::LhsProgress, A0);
+ straits.loadRhsQuad(blA+2*spk, B_0);
+ straits.madd(A0,B_0,C2,B_0);
+
+ straits.loadLhsUnaligned(blB+3*SwappedTraits::LhsProgress, A0);
+ straits.loadRhsQuad(blA+3*spk, B_0);
+ straits.madd(A0,B_0,C3,B_0);
+
+ blB += 4*SwappedTraits::LhsProgress;
+ blA += 4*spk;
+ }
+ C0 = padd(padd(C0,C1),padd(C2,C3));
+ for(; k<endk; k+=spk)
+ {
+ SLhsPacket A0;
+ SRhsPacket B_0;
+
+ straits.loadLhsUnaligned(blB, A0);
+ straits.loadRhsQuad(blA, B_0);
+ straits.madd(A0,B_0,C0,B_0);
+
+ blB += SwappedTraits::LhsProgress;
+ blA += spk;
+ }
+ if(SwappedTraits::LhsProgress==8)
+ {
+ // Special case where we have to first reduce the accumulation register C0
+ typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SResPacket>::half,SResPacket>::type SResPacketHalf;
+ typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SLhsPacket>::half,SLhsPacket>::type SLhsPacketHalf;
+ typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SLhsPacket>::half,SRhsPacket>::type SRhsPacketHalf;
+ typedef typename conditional<SwappedTraits::LhsProgress==8,typename unpacket_traits<SAccPacket>::half,SAccPacket>::type SAccPacketHalf;
+
+ SResPacketHalf R = pgather<SResScalar, SResPacketHalf>(&res[j2*resStride + i], resStride);
+ SResPacketHalf alphav = pset1<SResPacketHalf>(alpha);
+
+ if(depth-endk>0)
+ {
+ // We have to handle the last row of the rhs which corresponds to a half-packet
+ SLhsPacketHalf a0;
+ SRhsPacketHalf b0;
+ straits.loadLhsUnaligned(blB, a0);
+ straits.loadRhs(blA, b0);
+ SAccPacketHalf c0 = predux4(C0);
+ straits.madd(a0,b0,c0,b0);
+ straits.acc(c0, alphav, R);
+ }
+ else
+ {
+ straits.acc(predux4(C0), alphav, R);
+ }
+ pscatter(&res[j2*resStride + i], R, resStride);
+ }
+ else
+ {
+ SResPacket R = pgather<SResScalar, SResPacket>(&res[j2*resStride + i], resStride);
+ SResPacket alphav = pset1<SResPacket>(alpha);
+ straits.acc(C0, alphav, R);
+ pscatter(&res[j2*resStride + i], R, resStride);
}
- SResPacket R = pgather<SResScalar, SResPacket>(&res[j2*resStride + i], resStride);
- SResPacket alphav = pset1<SResPacket>(alpha);
- straits.acc(C0, alphav, R);
- pscatter(&res[j2*resStride + i], R, resStride);
-
- EIGEN_ASM_COMMENT("end_vectorized_multiplication_of_last_rows 1x4");
}
- else
+ else // scalar path
{
- // Pure scalar path
- // gets a 1 x 4 res block as registers
+ // get a 1 x 4 res block as registers
ResScalar C0(0), C1(0), C2(0), C3(0);
for(Index k=0; k<depth; k++)
@@ -1049,7 +1368,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
B_1 = blB[3];
MADD(cj,A0,B_0,C2, B_0);
MADD(cj,A0,B_1,C3, B_1);
-
+
blB += 4;
}
res[(j2+0)*resStride + i] += alpha*C0;
@@ -1058,56 +1377,22 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
res[(j2+3)*resStride + i] += alpha*C3;
}
}
- }
- }
-
- // process remaining rhs/res columns one at a time
- for(Index j2=packet_cols4; j2<cols; j2++)
- {
- // vectorized path
- for(Index i=0; i<peeled_mc; i+=mr)
- {
- // get res block as registers
- AccPacket C0;
- traits.initAcc(C0);
-
- const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
- prefetch(&blA[0]);
- const RhsScalar* blB = &blockB[j2*strideB+offsetB];
- for(Index k=0; k<depth; k++)
- {
- LhsPacket A0;
- RhsPacket B_0;
-
- traits.loadLhs(blA, A0);
- traits.loadRhs(blB, B_0);
- traits.madd(A0,B_0,C0,B_0);
-
- blB += RhsProgress;
- blA += LhsProgress;
- }
- ResPacket R0;
- ResPacket alphav = pset1<ResPacket>(alpha);
- ResScalar* r0 = &res[(j2+0)*resStride + i];
- R0 = ploadu<ResPacket>(r0);
- traits.acc(C0, alphav, R0);
- pstoreu(r0, R0);
- }
- // pure scalar path
- for(Index i=peeled_mc; i<rows; i++)
- {
- const LhsScalar* blA = &blockA[i*strideA+offsetA];
- prefetch(&blA[0]);
- // gets a 1 x 1 res block as registers
- ResScalar C0(0);
- const RhsScalar* blB = &blockB[j2*strideB+offsetB];
- for(Index k=0; k<depth; k++)
+ // remaining columns
+ for(Index j2=packet_cols4; j2<cols; j2++)
{
- LhsScalar A0 = blA[k];
- RhsScalar B_0 = blB[k];
- MADD(cj, A0, B_0, C0, B_0);
+ const LhsScalar* blA = &blockA[i*strideA+offsetA];
+ prefetch(&blA[0]);
+ // gets a 1 x 1 res block as registers
+ ResScalar C0(0);
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB];
+ for(Index k=0; k<depth; k++)
+ {
+ LhsScalar A0 = blA[k];
+ RhsScalar B_0 = blB[k];
+ MADD(cj, A0, B_0, C0, B_0);
+ }
+ res[(j2+0)*resStride + i] += alpha*C0;
}
- res[(j2+0)*resStride + i] += alpha*C0;
}
}
}
@@ -1129,14 +1414,14 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
//
// 32 33 34 35 ...
// 36 36 38 39 ...
-template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode>
-struct gemm_pack_lhs
+template<typename Scalar, typename Index, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<Scalar, Index, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
{
EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride=0, Index offset=0);
};
-template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder, bool Conjugate, bool PanelMode>
-EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, StorageOrder, Conjugate, PanelMode>
+template<typename Scalar, typename Index, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, ColMajor, Conjugate, PanelMode>
::operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride, Index offset)
{
typedef typename packet_traits<Scalar>::type Packet;
@@ -1146,87 +1431,174 @@ EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, StorageOrder,
EIGEN_UNUSED_VARIABLE(stride);
EIGEN_UNUSED_VARIABLE(offset);
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
- eigen_assert( (StorageOrder==RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) );
+ eigen_assert( ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) || (Pack1<=4) );
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
- const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
+ const_blas_data_mapper<Scalar, Index, ColMajor> lhs(_lhs,lhsStride);
Index count = 0;
- Index peeled_mc = (rows/Pack1)*Pack1;
- for(Index i=0; i<peeled_mc; i+=Pack1)
+
+ const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
+ const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
+ const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
+
+ // Pack 3 packets
+ if(Pack1>=3*PacketSize)
{
- if(PanelMode) count += Pack1 * offset;
-
- if(StorageOrder==ColMajor)
+ for(Index i=0; i<peeled_mc3; i+=3*PacketSize)
{
+ if(PanelMode) count += (3*PacketSize) * offset;
+
for(Index k=0; k<depth; k++)
{
- if((Pack1%PacketSize)==0)
- {
- Packet A, B, C, D;
- if(Pack1>=1*PacketSize) A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
- if(Pack1>=2*PacketSize) B = ploadu<Packet>(&lhs(i+1*PacketSize, k));
- if(Pack1>=3*PacketSize) C = ploadu<Packet>(&lhs(i+2*PacketSize, k));
- if(Pack1>=4*PacketSize) D = ploadu<Packet>(&lhs(i+3*PacketSize, k));
- if(Pack1>=1*PacketSize) { pstore(blockA+count, cj.pconj(A)); count+=PacketSize; }
- if(Pack1>=2*PacketSize) { pstore(blockA+count, cj.pconj(B)); count+=PacketSize; }
- if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; }
- if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; }
- }
- else
- {
- if(Pack1>=1) blockA[count++] = cj(lhs(i+0, k));
- if(Pack1>=2) blockA[count++] = cj(lhs(i+1, k));
- if(Pack1>=3) blockA[count++] = cj(lhs(i+2, k));
- if(Pack1>=4) blockA[count++] = cj(lhs(i+3, k));
- }
+ Packet A, B, C;
+ A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
+ B = ploadu<Packet>(&lhs(i+1*PacketSize, k));
+ C = ploadu<Packet>(&lhs(i+2*PacketSize, k));
+ pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
+ pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
+ pstore(blockA+count, cj.pconj(C)); count+=PacketSize;
}
+ if(PanelMode) count += (3*PacketSize) * (stride-offset-depth);
}
- else
+ }
+ // Pack 2 packets
+ if(Pack1>=2*PacketSize)
+ {
+ for(Index i=peeled_mc3; i<peeled_mc2; i+=2*PacketSize)
{
+ if(PanelMode) count += (2*PacketSize) * offset;
+
+ for(Index k=0; k<depth; k++)
+ {
+ Packet A, B;
+ A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
+ B = ploadu<Packet>(&lhs(i+1*PacketSize, k));
+ pstore(blockA+count, cj.pconj(A)); count+=PacketSize;
+ pstore(blockA+count, cj.pconj(B)); count+=PacketSize;
+ }
+ if(PanelMode) count += (2*PacketSize) * (stride-offset-depth);
+ }
+ }
+ // Pack 1 packets
+ if(Pack1>=1*PacketSize)
+ {
+ for(Index i=peeled_mc2; i<peeled_mc1; i+=1*PacketSize)
+ {
+ if(PanelMode) count += (1*PacketSize) * offset;
+
+ for(Index k=0; k<depth; k++)
+ {
+ Packet A;
+ A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
+ pstore(blockA+count, cj.pconj(A));
+ count+=PacketSize;
+ }
+ if(PanelMode) count += (1*PacketSize) * (stride-offset-depth);
+ }
+ }
+ // Pack scalars
+// if(rows-peeled_mc>=Pack2)
+// {
+// if(PanelMode) count += Pack2*offset;
+// for(Index k=0; k<depth; k++)
+// for(Index w=0; w<Pack2; w++)
+// blockA[count++] = cj(lhs(peeled_mc+w, k));
+// if(PanelMode) count += Pack2 * (stride-offset-depth);
+// peeled_mc += Pack2;
+// }
+ for(Index i=peeled_mc1; i<rows; i++)
+ {
+ if(PanelMode) count += offset;
+ for(Index k=0; k<depth; k++)
+ blockA[count++] = cj(lhs(i, k));
+ if(PanelMode) count += (stride-offset-depth);
+ }
+}
+
+template<typename Scalar, typename Index, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
+struct gemm_pack_lhs<Scalar, Index, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
+{
+ EIGEN_DONT_INLINE void operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride=0, Index offset=0);
+};
+
+template<typename Scalar, typename Index, int Pack1, int Pack2, bool Conjugate, bool PanelMode>
+EIGEN_DONT_INLINE void gemm_pack_lhs<Scalar, Index, Pack1, Pack2, RowMajor, Conjugate, PanelMode>
+ ::operator()(Scalar* blockA, const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows, Index stride, Index offset)
+{
+ typedef typename packet_traits<Scalar>::type Packet;
+ enum { PacketSize = packet_traits<Scalar>::size };
+
+ EIGEN_ASM_COMMENT("EIGEN PRODUCT PACK LHS");
+ EIGEN_UNUSED_VARIABLE(stride);
+ EIGEN_UNUSED_VARIABLE(offset);
+ eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
+ conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
+ const_blas_data_mapper<Scalar, Index, RowMajor> lhs(_lhs,lhsStride);
+ Index count = 0;
+
+// const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
+// const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
+// const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
+
+ int pack_packets = Pack1/PacketSize;
+ Index i = 0;
+ while(pack_packets>0)
+ {
+
+ Index remaining_rows = rows-i;
+ Index peeled_mc = i+(remaining_rows/(pack_packets*PacketSize))*(pack_packets*PacketSize);
+// std::cout << "pack_packets = " << pack_packets << " from " << i << " to " << peeled_mc << "\n";
+ for(; i<peeled_mc; i+=pack_packets*PacketSize)
+ {
+ if(PanelMode) count += (pack_packets*PacketSize) * offset;
+
const Index peeled_k = (depth/PacketSize)*PacketSize;
Index k=0;
- for(; k<peeled_k; k+=PacketSize) {
- for (Index m = 0; m < Pack1; m += PacketSize) {
+ for(; k<peeled_k; k+=PacketSize)
+ {
+ for (Index m = 0; m < (pack_packets*PacketSize); m += PacketSize)
+ {
Kernel<Packet> kernel;
- for (int p = 0; p < PacketSize; ++p) {
- kernel.packet[p] = ploadu<Packet>(&lhs(i+p+m, k));
- }
+ for (int p = 0; p < PacketSize; ++p) kernel.packet[p] = ploadu<Packet>(&lhs(i+p+m, k));
ptranspose(kernel);
- for (int p = 0; p < PacketSize; ++p) {
- pstore(blockA+count+m+Pack1*p, cj.pconj(kernel.packet[p]));
- }
+ for (int p = 0; p < PacketSize; ++p) pstore(blockA+count+m+(pack_packets*PacketSize)*p, cj.pconj(kernel.packet[p]));
}
- count += PacketSize*Pack1;
+ count += PacketSize*(pack_packets*PacketSize);
}
- for(; k<depth; k++) {
+ for(; k<depth; k++)
+ {
Index w=0;
- for(; w<Pack1-3; w+=4)
+ for(; w<(pack_packets*PacketSize)-3; w+=4)
{
Scalar a(cj(lhs(i+w+0, k))),
- b(cj(lhs(i+w+1, k))),
- c(cj(lhs(i+w+2, k))),
- d(cj(lhs(i+w+3, k)));
+ b(cj(lhs(i+w+1, k))),
+ c(cj(lhs(i+w+2, k))),
+ d(cj(lhs(i+w+3, k)));
blockA[count++] = a;
blockA[count++] = b;
blockA[count++] = c;
blockA[count++] = d;
}
- if(Pack1%4)
- for(;w<Pack1;++w)
+ if(PacketSize%4)
+ for(;w<pack_packets*PacketSize;++w)
blockA[count++] = cj(lhs(i+w, k));
}
+
+ if(PanelMode) count += (pack_packets*PacketSize) * (stride-offset-depth);
}
- if(PanelMode) count += Pack1 * (stride-offset-depth);
- }
- if(rows-peeled_mc>=Pack2)
- {
- if(PanelMode) count += Pack2*offset;
- for(Index k=0; k<depth; k++)
- for(Index w=0; w<Pack2; w++)
- blockA[count++] = cj(lhs(peeled_mc+w, k));
- if(PanelMode) count += Pack2 * (stride-offset-depth);
- peeled_mc += Pack2;
+
+ pack_packets--;
}
- for(Index i=peeled_mc; i<rows; i++)
+
+// if(rows-peeled_mc>=Pack2)
+// {
+// if(PanelMode) count += Pack2*offset;
+// for(Index k=0; k<depth; k++)
+// for(Index w=0; w<Pack2; w++)
+// blockA[count++] = cj(lhs(peeled_mc+w, k));
+// if(PanelMode) count += Pack2 * (stride-offset-depth);
+// peeled_mc += Pack2;
+// }
+ for(; i<rows; i++)
{
if(PanelMode) count += offset;
for(Index k=0; k<depth; k++)
@@ -1263,51 +1635,51 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, Pan
Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
Index count = 0;
const Index peeled_k = (depth/PacketSize)*PacketSize;
- if(nr>=8)
- {
- for(Index j2=0; j2<packet_cols8; j2+=8)
- {
- // skip what we have before
- if(PanelMode) count += 8 * offset;
- const Scalar* b0 = &rhs[(j2+0)*rhsStride];
- const Scalar* b1 = &rhs[(j2+1)*rhsStride];
- const Scalar* b2 = &rhs[(j2+2)*rhsStride];
- const Scalar* b3 = &rhs[(j2+3)*rhsStride];
- const Scalar* b4 = &rhs[(j2+4)*rhsStride];
- const Scalar* b5 = &rhs[(j2+5)*rhsStride];
- const Scalar* b6 = &rhs[(j2+6)*rhsStride];
- const Scalar* b7 = &rhs[(j2+7)*rhsStride];
- Index k=0;
- if(PacketSize==8) // TODO enbale vectorized transposition for PacketSize==4
- {
- for(; k<peeled_k; k+=PacketSize) {
- Kernel<Packet> kernel;
- for (int p = 0; p < PacketSize; ++p) {
- kernel.packet[p] = ploadu<Packet>(&rhs[(j2+p)*rhsStride+k]);
- }
- ptranspose(kernel);
- for (int p = 0; p < PacketSize; ++p) {
- pstoreu(blockB+count, cj.pconj(kernel.packet[p]));
- count+=PacketSize;
- }
- }
- }
- for(; k<depth; k++)
- {
- blockB[count+0] = cj(b0[k]);
- blockB[count+1] = cj(b1[k]);
- blockB[count+2] = cj(b2[k]);
- blockB[count+3] = cj(b3[k]);
- blockB[count+4] = cj(b4[k]);
- blockB[count+5] = cj(b5[k]);
- blockB[count+6] = cj(b6[k]);
- blockB[count+7] = cj(b7[k]);
- count += 8;
- }
- // skip what we have after
- if(PanelMode) count += 8 * (stride-offset-depth);
- }
- }
+// if(nr>=8)
+// {
+// for(Index j2=0; j2<packet_cols8; j2+=8)
+// {
+// // skip what we have before
+// if(PanelMode) count += 8 * offset;
+// const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+// const Scalar* b1 = &rhs[(j2+1)*rhsStride];
+// const Scalar* b2 = &rhs[(j2+2)*rhsStride];
+// const Scalar* b3 = &rhs[(j2+3)*rhsStride];
+// const Scalar* b4 = &rhs[(j2+4)*rhsStride];
+// const Scalar* b5 = &rhs[(j2+5)*rhsStride];
+// const Scalar* b6 = &rhs[(j2+6)*rhsStride];
+// const Scalar* b7 = &rhs[(j2+7)*rhsStride];
+// Index k=0;
+// if(PacketSize==8) // TODO enbale vectorized transposition for PacketSize==4
+// {
+// for(; k<peeled_k; k+=PacketSize) {
+// Kernel<Packet> kernel;
+// for (int p = 0; p < PacketSize; ++p) {
+// kernel.packet[p] = ploadu<Packet>(&rhs[(j2+p)*rhsStride+k]);
+// }
+// ptranspose(kernel);
+// for (int p = 0; p < PacketSize; ++p) {
+// pstoreu(blockB+count, cj.pconj(kernel.packet[p]));
+// count+=PacketSize;
+// }
+// }
+// }
+// for(; k<depth; k++)
+// {
+// blockB[count+0] = cj(b0[k]);
+// blockB[count+1] = cj(b1[k]);
+// blockB[count+2] = cj(b2[k]);
+// blockB[count+3] = cj(b3[k]);
+// blockB[count+4] = cj(b4[k]);
+// blockB[count+5] = cj(b5[k]);
+// blockB[count+6] = cj(b6[k]);
+// blockB[count+7] = cj(b7[k]);
+// count += 8;
+// }
+// // skip what we have after
+// if(PanelMode) count += 8 * (stride-offset-depth);
+// }
+// }
if(nr>=4)
{
@@ -1383,39 +1755,39 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, Pan
Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
Index count = 0;
- if(nr>=8)
- {
- for(Index j2=0; j2<packet_cols8; j2+=8)
- {
- // skip what we have before
- if(PanelMode) count += 8 * offset;
- for(Index k=0; k<depth; k++)
- {
- if (PacketSize==8) {
- Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
- pstoreu(blockB+count, cj.pconj(A));
- } else if (PacketSize==4) {
- Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
- Packet B = ploadu<Packet>(&rhs[k*rhsStride + j2 + PacketSize]);
- pstoreu(blockB+count, cj.pconj(A));
- pstoreu(blockB+count+PacketSize, cj.pconj(B));
- } else {
- const Scalar* b0 = &rhs[k*rhsStride + j2];
- blockB[count+0] = cj(b0[0]);
- blockB[count+1] = cj(b0[1]);
- blockB[count+2] = cj(b0[2]);
- blockB[count+3] = cj(b0[3]);
- blockB[count+4] = cj(b0[4]);
- blockB[count+5] = cj(b0[5]);
- blockB[count+6] = cj(b0[6]);
- blockB[count+7] = cj(b0[7]);
- }
- count += 8;
- }
- // skip what we have after
- if(PanelMode) count += 8 * (stride-offset-depth);
- }
- }
+// if(nr>=8)
+// {
+// for(Index j2=0; j2<packet_cols8; j2+=8)
+// {
+// // skip what we have before
+// if(PanelMode) count += 8 * offset;
+// for(Index k=0; k<depth; k++)
+// {
+// if (PacketSize==8) {
+// Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
+// pstoreu(blockB+count, cj.pconj(A));
+// } else if (PacketSize==4) {
+// Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
+// Packet B = ploadu<Packet>(&rhs[k*rhsStride + j2 + PacketSize]);
+// pstoreu(blockB+count, cj.pconj(A));
+// pstoreu(blockB+count+PacketSize, cj.pconj(B));
+// } else {
+// const Scalar* b0 = &rhs[k*rhsStride + j2];
+// blockB[count+0] = cj(b0[0]);
+// blockB[count+1] = cj(b0[1]);
+// blockB[count+2] = cj(b0[2]);
+// blockB[count+3] = cj(b0[3]);
+// blockB[count+4] = cj(b0[4]);
+// blockB[count+5] = cj(b0[5]);
+// blockB[count+6] = cj(b0[6]);
+// blockB[count+7] = cj(b0[7]);
+// }
+// count += 8;
+// }
+// // skip what we have after
+// if(PanelMode) count += 8 * (stride-offset-depth);
+// }
+// }
if(nr>=4)
{
for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
diff --git a/Eigen/src/Core/products/GeneralMatrixMatrix.h b/Eigen/src/Core/products/GeneralMatrixMatrix.h
index b35625a11..d06e0f808 100644
--- a/Eigen/src/Core/products/GeneralMatrixMatrix.h
+++ b/Eigen/src/Core/products/GeneralMatrixMatrix.h
@@ -23,6 +23,8 @@ template<
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
+ typedef gebp_traits<RhsScalar,LhsScalar> Traits;
+
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
@@ -51,6 +53,8 @@ template<
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
+typedef gebp_traits<LhsScalar,RhsScalar> Traits;
+
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
@@ -63,11 +67,9 @@ static void run(Index rows, Index cols, Index depth,
const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
- typedef gebp_traits<LhsScalar,RhsScalar> Traits;
-
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
- //Index nc = blocking.nc(); // cache block size along the N direction
+ Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
@@ -80,66 +82,68 @@ static void run(Index rows, Index cols, Index depth,
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
- std::size_t sizeA = kc*mc;
- ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
+ LhsScalar* blockA = blocking.blockA();
+ eigen_internal_assert(blockA!=0);
- RhsScalar* blockB = blocking.blockB();
- eigen_internal_assert(blockB!=0);
-
+ std::size_t sizeB = kc*nc;
+ ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0);
+
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
- // let's start by packing A'.
- pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
+ // let's start by packing B'.
+ pack_rhs(blockB, &rhs(k,0), rhsStride, actual_kc, nc);
- // Pack B_k to B' in a parallel fashion:
- // each thread packs the sub block B_k,j to B'_j where j is the thread id.
+ // Pack A_k to A' in a parallel fashion:
+ // each thread packs the sub block A_k,i to A'_i where i is the thread id.
- // However, before copying to B'_j, we have to make sure that no other thread is still using it,
+ // However, before copying to A'_i, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
+
+ pack_lhs(blockA+info[tid].lhs_start*actual_kc, &lhs(info[tid].lhs_start,k), lhsStride, actual_kc, info[tid].lhs_length);
- pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
-
- // Notify the other threads that the part B'_j is ready to go.
+ // Notify the other threads that the part A'_i is ready to go.
info[tid].sync = k;
-
- // Computes C_i += A' * B' per B'_j
+
+ // Computes C_i += A' * B' per A'_i
for(Index shift=0; shift<threads; ++shift)
{
- Index j = (tid+shift)%threads;
+ Index i = (tid+shift)%threads;
- // At this point we have to make sure that B'_j has been updated by the thread j,
+ // At this point we have to make sure that A'_i has been updated by the thread i,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if(shift>0)
- while(info[j].sync!=k) {}
-
- gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0);
+ while(info[i].sync!=k) {}
+ gebp(res+info[i].lhs_start, resStride, blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha);
}
- // Then keep going as usual with the remaining A'
- for(Index i=mc; i<rows; i+=mc)
+ // Then keep going as usual with the remaining B'
+ for(Index j=nc; j<cols; j+=nc)
{
- const Index actual_mc = (std::min)(i+mc,rows)-i;
+ const Index actual_nc = (std::min)(j+nc,cols)-j;
- // pack A_i,k to A'
- pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
+ // pack B_k,j to B'
+ pack_rhs(blockB, &rhs(k,j), rhsStride, actual_kc, actual_nc);
- // C_i += A' * B'
- gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0);
+ // C_j += A' * B'
+ gebp(res+j*resStride, resStride, blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
- // Release all the sub blocks B'_j of B' for the current thread,
+ // Release all the sub blocks A'_i of A' for the current thread,
// i.e., we simply decrement the number of users by 1
- for(Index j=0; j<threads; ++j)
+ #pragma omp critical
+ {
+ for(Index i=0; i<threads; ++i)
#pragma omp atomic
- --(info[j].users);
+ --(info[i].users);
+ }
}
}
else
@@ -149,36 +153,34 @@ static void run(Index rows, Index cols, Index depth,
// this is the sequential version!
std::size_t sizeA = kc*mc;
- std::size_t sizeB = kc*cols;
+ std::size_t sizeB = kc*nc;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
- // (==GEMM_VAR1)
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
- // => Pack rhs's panel into a sequential chunk of memory (L2 caching)
- // Note that this panel will be read as many times as the number of blocks in the lhs's
- // vertical panel which is, in practice, a very low number.
- pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
-
- // For each mc x kc block of the lhs's vertical panel...
- // (==GEPP_VAR1)
- for(Index i2=0; i2<rows; i2+=mc)
+ // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching)
+ // Note that this panel will be read as many times as the number of blocks in the rhs's
+ // horizontal panel which is, in practice, a very low number.
+ pack_lhs(blockA, &lhs(0,k2), lhsStride, actual_kc, rows);
+
+ // For each kc x nc block of the rhs's horizontal panel...
+ for(Index j2=0; j2<cols; j2+=nc)
{
- const Index actual_mc = (std::min)(i2+mc,rows)-i2;
+ const Index actual_nc = (std::min)(j2+nc,cols)-j2;
- // We pack the lhs's block into a sequential chunk of memory (L1 caching)
+ // We pack the rhs's block into a sequential chunk of memory (L2 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
- // micro vertical panel of the large rhs's panel (e.g., cols/4 times).
- pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
+ // micro horizontal panel of the large rhs's panel (e.g., rows/12 times).
+ pack_rhs(blockB, &rhs(k2,j2), rhsStride, actual_kc, actual_nc);
- // Everything is packed, we can now call the block * panel kernel:
- gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0);
+ // Everything is packed, we can now call the panel * block kernel:
+ gebp(res+j2*resStride, resStride, blockA, blockB, rows, actual_kc, actual_nc, alpha);
}
}
}
@@ -199,14 +201,13 @@ struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
- gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha,
- BlockingType& blocking)
+ gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession() const
{
- m_blocking.allocateB();
+ m_blocking.allocateA();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
@@ -220,6 +221,8 @@ struct gemm_functor
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
+
+ typedef typename Gemm::Traits Traits;
protected:
const Lhs& m_lhs;
@@ -316,13 +319,23 @@ class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, M
public:
- gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
+ gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth, bool full_rows = false)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
- computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc);
+ if(full_rows)
+ {
+ DenseIndex m = this->m_mc;
+ computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc);
+ }
+ else // full columns
+ {
+ DenseIndex n = this->m_nc;
+ computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n);
+ }
+
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
}
@@ -396,7 +409,7 @@ class GeneralProduct<Lhs, Rhs, GemmProduct>
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
_ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
- BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
+ BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), true);
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
}
diff --git a/Eigen/src/Core/products/Parallelizer.h b/Eigen/src/Core/products/Parallelizer.h
index 5c3e9b7ac..4079063eb 100644
--- a/Eigen/src/Core/products/Parallelizer.h
+++ b/Eigen/src/Core/products/Parallelizer.h
@@ -73,13 +73,13 @@ namespace internal {
template<typename Index> struct GemmParallelInfo
{
- GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {}
+ GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
int volatile sync;
int volatile users;
- Index rhs_start;
- Index rhs_length;
+ Index lhs_start;
+ Index lhs_length;
};
template<bool Condition, typename Functor, typename Index>
@@ -107,7 +107,7 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos
if((!Condition) || (omp_get_num_threads()>1))
return func(0,rows, 0,cols);
- Index size = transpose ? cols : rows;
+ Index size = transpose ? rows : cols;
// 2- compute the maximal number of threads from the size of the product:
// FIXME this has to be fine tuned
@@ -126,26 +126,25 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpos
std::swap(rows,cols);
Index blockCols = (cols / threads) & ~Index(0x3);
- Index blockRows = (rows / threads) & ~Index(0x7);
+ Index blockRows = (rows / threads);
+ blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads];
- #pragma omp parallel for schedule(static,1) num_threads(threads)
- for(Index i=0; i<threads; ++i)
+ #pragma omp parallel num_threads(threads)
{
+ Index i = omp_get_thread_num();
Index r0 = i*blockRows;
Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows;
Index c0 = i*blockCols;
Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols;
- info[i].rhs_start = c0;
- info[i].rhs_length = actualBlockCols;
+ info[i].lhs_start = r0;
+ info[i].lhs_length = actualBlockRows;
- if(transpose)
- func(0, cols, r0, actualBlockRows, info);
- else
- func(r0, actualBlockRows, 0,cols, info);
+ if(transpose) func(c0, actualBlockCols, 0, rows, info);
+ else func(0, rows, c0, actualBlockCols, info);
}
delete[] info;
diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
index 34480c707..d67164ec3 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
@@ -15,7 +15,7 @@ namespace Eigen {
namespace internal {
// pack a selfadjoint block diagonal for use with the gebp_kernel
-template<typename Scalar, typename Index, int Pack1, int Pack2, int StorageOrder>
+template<typename Scalar, typename Index, int Pack1, int Pack2_dummy, int StorageOrder>
struct symm_pack_lhs
{
template<int BlockRows> inline
@@ -45,22 +45,29 @@ struct symm_pack_lhs
}
void operator()(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows)
{
+ enum { PacketSize = packet_traits<Scalar>::size };
const_blas_data_mapper<Scalar,Index,StorageOrder> lhs(_lhs,lhsStride);
Index count = 0;
- Index peeled_mc = (rows/Pack1)*Pack1;
- for(Index i=0; i<peeled_mc; i+=Pack1)
- {
- pack<Pack1>(blockA, lhs, cols, i, count);
- }
-
- if(rows-peeled_mc>=Pack2)
- {
- pack<Pack2>(blockA, lhs, cols, peeled_mc, count);
- peeled_mc += Pack2;
- }
+ //Index peeled_mc3 = (rows/Pack1)*Pack1;
+
+ const Index peeled_mc3 = Pack1>=3*PacketSize ? (rows/(3*PacketSize))*(3*PacketSize) : 0;
+ const Index peeled_mc2 = Pack1>=2*PacketSize ? peeled_mc3+((rows-peeled_mc3)/(2*PacketSize))*(2*PacketSize) : 0;
+ const Index peeled_mc1 = Pack1>=1*PacketSize ? (rows/(1*PacketSize))*(1*PacketSize) : 0;
+
+ if(Pack1>=3*PacketSize)
+ for(Index i=0; i<peeled_mc3; i+=3*PacketSize)
+ pack<3*PacketSize>(blockA, lhs, cols, i, count);
+
+ if(Pack1>=2*PacketSize)
+ for(Index i=peeled_mc3; i<peeled_mc2; i+=2*PacketSize)
+ pack<2*PacketSize>(blockA, lhs, cols, i, count);
+
+ if(Pack1>=1*PacketSize)
+ for(Index i=peeled_mc2; i<peeled_mc1; i+=1*PacketSize)
+ pack<1*PacketSize>(blockA, lhs, cols, i, count);
// do the same with mr==1
- for(Index i=peeled_mc; i<rows; i++)
+ for(Index i=peeled_mc1; i<rows; i++)
{
for(Index k=0; k<i; k++)
blockA[count++] = lhs(i, k); // normal