From 46fa4c713fa2fdb472e287cad95b5933135e6503 Mon Sep 17 00:00:00 2001 From: Gael Guennebaud Date: Mon, 5 May 2008 10:23:29 +0000 Subject: * Started support for unaligned vectorization. * Introduce a new highly optimized matrix-matrix product for large matrices. The code is still highly experimental and it is activated only if you define EIGEN_WIP_PRODUCT at compile time. Currently the third dimension of the product must be a factor of the packet size (x4 for floats) and the right handed side matrix must be column major. Moreover, currently c = a*b; actually computes c += a*b !! Therefore, the code is provided for experimentation purpose only ! These limitations will be fixed soon or later to become the default product implementation. --- Eigen/src/Core/Assign.h | 12 +- Eigen/src/Core/Block.h | 12 + Eigen/src/Core/CwiseBinaryOp.h | 3 +- Eigen/src/Core/CwiseNullaryOp.h | 1 + Eigen/src/Core/CwiseUnaryOp.h | 3 +- Eigen/src/Core/Lazy.h | 3 +- Eigen/src/Core/Matrix.h | 23 +- Eigen/src/Core/MatrixBase.h | 8 +- Eigen/src/Core/PacketMath.h | 68 ++++-- Eigen/src/Core/Product.h | 89 ++++--- Eigen/src/Core/ProductWIP.h | 496 ++++++++++++++++++++++++++++++++++++++++ Eigen/src/Core/Temporary.h | 3 +- Eigen/src/Core/Transpose.h | 6 +- Eigen/src/Core/util/Constants.h | 1 + 14 files changed, 654 insertions(+), 74 deletions(-) create mode 100644 Eigen/src/Core/ProductWIP.h (limited to 'Eigen/src') diff --git a/Eigen/src/Core/Assign.h b/Eigen/src/Core/Assign.h index 3862ae831..384059185 100644 --- a/Eigen/src/Core/Assign.h +++ b/Eigen/src/Core/Assign.h @@ -78,7 +78,7 @@ struct ei_matrix_assignment_packet_unroller { ei_matrix_assignment_packet_unroller::size>::run(dst, src); - dst.writePacketCoeff(row, col, src.packetCoeff(row, col)); + dst.template writePacketCoeff(row, col, src.template packetCoeff(row, col)); } }; @@ -87,7 +87,7 @@ struct ei_matrix_assignment_packet_unroller { static void run(Derived1 &dst, const Derived2 &src) { - dst.writePacketCoeff(0, 0, src.packetCoeff(0, 0)); + dst.template writePacketCoeff(0, 0, src.template packetCoeff(0, 0)); } }; @@ -211,7 +211,7 @@ struct ei_assignment_impl // FIXME the following is not really efficient int i = index/dst.rows(); int j = index%dst.rows(); - dst.writePacketCoeff(i, j, src.packetCoeff(i, j)); + dst.template writePacketCoeff(i, j, src.template packetCoeff(i, j)); } for(int i = alignedSize/dst.rows(); i < dst.rows(); i++) for(int j = alignedSize%dst.rows(); j < dst.cols(); j++) @@ -222,7 +222,7 @@ struct ei_assignment_impl // std::cout << "vectorized normal row major\n"; for(int i = 0; i < dst.rows(); i++) for(int j = 0; j < dst.cols(); j+=ei_packet_traits::size) - dst.writePacketCoeff(i, j, src.packetCoeff(i, j)); + dst.template writePacketCoeff(i, j, src.template packetCoeff(i, j)); } } else @@ -240,7 +240,7 @@ struct ei_assignment_impl // FIXME the following is not really efficient int i = index%dst.rows(); int j = index/dst.rows(); - dst.writePacketCoeff(i, j, src.packetCoeff(i, j)); + dst.template writePacketCoeff(i, j, src.template packetCoeff(i, j)); } for(int j = alignedSize/dst.rows(); j < dst.cols(); j++) for(int i = alignedSize%dst.rows(); i < dst.rows(); i++) @@ -251,7 +251,7 @@ struct ei_assignment_impl // std::cout << "vectorized normal col major\n"; for(int j = 0; j < dst.cols(); j++) for(int i = 0; i < dst.rows(); i+=ei_packet_traits::size) - dst.writePacketCoeff(i, j, src.packetCoeff(i, j)); + dst.template writePacketCoeff(i, j, src.template packetCoeff(i, j)); } } } diff --git a/Eigen/src/Core/Block.h b/Eigen/src/Core/Block.h index 6c00e0ba8..247a46b40 100644 --- a/Eigen/src/Core/Block.h +++ b/Eigen/src/Core/Block.h @@ -143,6 +143,18 @@ template class Block return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value()); } + template + PacketScalar _packetCoeff(int row, int col) const + { + return m_matrix.packetCoeff(row + m_startRow.value(), col + m_startCol.value()); + } + + template + void _writePacketCoeff(int row, int col, const PacketScalar& x) + { + m_matrix.const_cast_derived().writePacketCoeff(row + m_startRow.value(), col + m_startCol.value(), x); + } + protected: const typename MatrixType::Nested m_matrix; diff --git a/Eigen/src/Core/CwiseBinaryOp.h b/Eigen/src/Core/CwiseBinaryOp.h index 3de3e2dd5..31afbe0f1 100644 --- a/Eigen/src/Core/CwiseBinaryOp.h +++ b/Eigen/src/Core/CwiseBinaryOp.h @@ -103,9 +103,10 @@ class CwiseBinaryOp : ei_no_assignment_operator, return m_functor(m_lhs.coeff(row, col), m_rhs.coeff(row, col)); } + template PacketScalar _packetCoeff(int row, int col) const { - return m_functor.packetOp(m_lhs.packetCoeff(row, col), m_rhs.packetCoeff(row, col)); + return m_functor.packetOp(m_lhs.template packetCoeff(row, col), m_rhs.template packetCoeff(row, col)); } protected: diff --git a/Eigen/src/Core/CwiseNullaryOp.h b/Eigen/src/Core/CwiseNullaryOp.h index e603280bf..bcc09d0c8 100644 --- a/Eigen/src/Core/CwiseNullaryOp.h +++ b/Eigen/src/Core/CwiseNullaryOp.h @@ -82,6 +82,7 @@ class CwiseNullaryOp : ei_no_assignment_operator, return m_functor(rows, cols); } + template PacketScalar _packetCoeff(int, int) const { return m_functor.packetOp(); diff --git a/Eigen/src/Core/CwiseUnaryOp.h b/Eigen/src/Core/CwiseUnaryOp.h index 76c6ed818..438417829 100644 --- a/Eigen/src/Core/CwiseUnaryOp.h +++ b/Eigen/src/Core/CwiseUnaryOp.h @@ -82,9 +82,10 @@ class CwiseUnaryOp : ei_no_assignment_operator, return m_functor(m_matrix.coeff(row, col)); } + template PacketScalar _packetCoeff(int row, int col) const { - return m_functor.packetOp(m_matrix.packetCoeff(row, col)); + return m_functor.packetOp(m_matrix.template packetCoeff(row, col)); } protected: diff --git a/Eigen/src/Core/Lazy.h b/Eigen/src/Core/Lazy.h index ffe777158..d2f763680 100644 --- a/Eigen/src/Core/Lazy.h +++ b/Eigen/src/Core/Lazy.h @@ -72,9 +72,10 @@ template class Lazy return m_expression.coeff(row, col); } + template PacketScalar _packetCoeff(int row, int col) const { - return m_expression.packetCoeff(row, col); + return m_expression.template packetCoeff(row, col); } protected: diff --git a/Eigen/src/Core/Matrix.h b/Eigen/src/Core/Matrix.h index dd1235aa3..922c3ddae 100644 --- a/Eigen/src/Core/Matrix.h +++ b/Eigen/src/Core/Matrix.h @@ -116,21 +116,36 @@ class Matrix : public MatrixBase PacketScalar _packetCoeff(int row, int col) const { ei_internal_assert(Flags & VectorizableBit); if(Flags & RowMajorBit) - return ei_pload(&m_storage.data()[col + row * m_storage.cols()]); + if (LoadMode==Aligned) + return ei_pload(&m_storage.data()[col + row * m_storage.cols()]); + else + return ei_ploadu(&m_storage.data()[col + row * m_storage.cols()]); else - return ei_pload(&m_storage.data()[row + col * m_storage.rows()]); + if (LoadMode==Aligned) + return ei_pload(&m_storage.data()[row + col * m_storage.rows()]); + else + return ei_ploadu(&m_storage.data()[row + col * m_storage.rows()]); } + + template void _writePacketCoeff(int row, int col, const PacketScalar& x) { ei_internal_assert(Flags & VectorizableBit); if(Flags & RowMajorBit) - ei_pstore(&m_storage.data()[col + row * m_storage.cols()], x); + if (StoreMode==Aligned) + ei_pstore(&m_storage.data()[col + row * m_storage.cols()], x); + else + ei_pstoreu(&m_storage.data()[col + row * m_storage.cols()], x); else - ei_pstore(&m_storage.data()[row + col * m_storage.rows()], x); + if (StoreMode==Aligned) + ei_pstore(&m_storage.data()[row + col * m_storage.rows()], x); + else + ei_pstoreu(&m_storage.data()[row + col * m_storage.rows()], x); } public: diff --git a/Eigen/src/Core/MatrixBase.h b/Eigen/src/Core/MatrixBase.h index a60ead6ba..97e8cab28 100644 --- a/Eigen/src/Core/MatrixBase.h +++ b/Eigen/src/Core/MatrixBase.h @@ -207,8 +207,10 @@ template class MatrixBase Scalar& coeffRef(int index); Scalar& operator[](int index); - PacketScalar packetCoeff(int row, int col) const { return derived()._packetCoeff(row,col); } - void writePacketCoeff(int row, int col, const PacketScalar& x) { return derived()._writePacketCoeff(row,col,x); } + template + PacketScalar packetCoeff(int row, int col) const { return derived().template _packetCoeff(row,col); } + template + void writePacketCoeff(int row, int col, const PacketScalar& x) { return derived().template _writePacketCoeff(row,col,x); } const Scalar x() const; const Scalar y() const; @@ -555,7 +557,9 @@ template class MatrixBase private: + template PacketScalar _packetCoeff(int , int) const { ei_internal_assert(false && "_packetCoeff not defined"); } + template void _writePacketCoeff(int , int, const PacketScalar&) { ei_internal_assert(false && "_packetCoeff not defined"); } }; diff --git a/Eigen/src/Core/PacketMath.h b/Eigen/src/Core/PacketMath.h index 140dcdde5..cfa19eb6a 100644 --- a/Eigen/src/Core/PacketMath.h +++ b/Eigen/src/Core/PacketMath.h @@ -54,29 +54,33 @@ template inline Scalar ei_pset1(const Scalar& a) { return a; } template inline void ei_pstore(Scalar* to, const Scalar& from) { (*to) = from; } /** \internal \returns the first element of a packet */ template inline Scalar ei_pfirst(const Scalar& a) { return a; } +/** \internal \returns a packet where the element i contains the sum of the packet of \a vec[i] */ +template inline Scalar ei_predux(const Scalar vecs[1]) { return vecs[0]; } +/** \internal \returns the sum of the elements of \a a*/ +template inline Scalar ei_predux(const Scalar& a) { return a; } #ifdef EIGEN_VECTORIZE_SSE #ifdef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD #undef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD -#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16 +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16 #endif template<> struct ei_packet_traits { typedef __m128 type; enum {size=4}; }; template<> struct ei_packet_traits { typedef __m128d type; enum {size=2}; }; template<> struct ei_packet_traits { typedef __m128i type; enum {size=4}; }; -inline __m128 ei_padd(const __m128& a, const __m128& b) { return _mm_add_ps(a,b); } -inline __m128d ei_padd(const __m128d& a, const __m128d& b) { return _mm_add_pd(a,b); } -inline __m128i ei_padd(const __m128i& a, const __m128i& b) { return _mm_add_epi32(a,b); } +template<> inline __m128 ei_padd(const __m128& a, const __m128& b) { return _mm_add_ps(a,b); } +template<> inline __m128d ei_padd(const __m128d& a, const __m128d& b) { return _mm_add_pd(a,b); } +template<> inline __m128i ei_padd(const __m128i& a, const __m128i& b) { return _mm_add_epi32(a,b); } -inline __m128 ei_psub(const __m128& a, const __m128& b) { return _mm_sub_ps(a,b); } -inline __m128d ei_psub(const __m128d& a, const __m128d& b) { return _mm_sub_pd(a,b); } -inline __m128i ei_psub(const __m128i& a, const __m128i& b) { return _mm_sub_epi32(a,b); } +template<> inline __m128 ei_psub(const __m128& a, const __m128& b) { return _mm_sub_ps(a,b); } +template<> inline __m128d ei_psub(const __m128d& a, const __m128d& b) { return _mm_sub_pd(a,b); } +template<> inline __m128i ei_psub(const __m128i& a, const __m128i& b) { return _mm_sub_epi32(a,b); } -inline __m128 ei_pmul(const __m128& a, const __m128& b) { return _mm_mul_ps(a,b); } -inline __m128d ei_pmul(const __m128d& a, const __m128d& b) { return _mm_mul_pd(a,b); } -inline __m128i ei_pmul(const __m128i& a, const __m128i& b) +template<> inline __m128 ei_pmul(const __m128& a, const __m128& b) { return _mm_mul_ps(a,b); } +template<> inline __m128d ei_pmul(const __m128d& a, const __m128d& b) { return _mm_mul_pd(a,b); } +template<> inline __m128i ei_pmul(const __m128i& a, const __m128i& b) { return _mm_or_si128( _mm_and_si128( @@ -89,21 +93,21 @@ inline __m128i ei_pmul(const __m128i& a, const __m128i& b) } // for some weird raisons, it has to be overloaded for packet integer -inline __m128i ei_pmadd(const __m128i& a, const __m128i& b, const __m128i& c) { return ei_padd(ei_pmul(a,b), c); } +template<> inline __m128i ei_pmadd(const __m128i& a, const __m128i& b, const __m128i& c) { return ei_padd(ei_pmul(a,b), c); } -inline __m128 ei_pmin(const __m128& a, const __m128& b) { return _mm_min_ps(a,b); } -inline __m128d ei_pmin(const __m128d& a, const __m128d& b) { return _mm_min_pd(a,b); } +template<> inline __m128 ei_pmin(const __m128& a, const __m128& b) { return _mm_min_ps(a,b); } +template<> inline __m128d ei_pmin(const __m128d& a, const __m128d& b) { return _mm_min_pd(a,b); } // FIXME this vectorized min operator is likely to be slower than the standard one -inline __m128i ei_pmin(const __m128i& a, const __m128i& b) +template<> inline __m128i ei_pmin(const __m128i& a, const __m128i& b) { __m128i mask = _mm_cmplt_epi32(a,b); return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); } -inline __m128 ei_pmax(const __m128& a, const __m128& b) { return _mm_max_ps(a,b); } -inline __m128d ei_pmax(const __m128d& a, const __m128d& b) { return _mm_max_pd(a,b); } +template<> inline __m128 ei_pmax(const __m128& a, const __m128& b) { return _mm_max_ps(a,b); } +template<> inline __m128d ei_pmax(const __m128d& a, const __m128d& b) { return _mm_max_pd(a,b); } // FIXME this vectorized max operator is likely to be slower than the standard one -inline __m128i ei_pmax(const __m128i& a, const __m128i& b) +template<> inline __m128i ei_pmax(const __m128i& a, const __m128i& b) { __m128i mask = _mm_cmpgt_epi32(a,b); return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); @@ -113,6 +117,10 @@ inline __m128 ei_pload(const float* from) { return _mm_load_ps(from); } inline __m128d ei_pload(const double* from) { return _mm_load_pd(from); } inline __m128i ei_pload(const int* from) { return _mm_load_si128(reinterpret_cast(from)); } +inline __m128 ei_ploadu(const float* from) { return _mm_loadu_ps(from); } +inline __m128d ei_ploadu(const double* from) { return _mm_loadu_pd(from); } +inline __m128i ei_ploadu(const int* from) { return _mm_loadu_si128(reinterpret_cast(from)); } + inline __m128 ei_pset1(const float& from) { return _mm_set1_ps(from); } inline __m128d ei_pset1(const double& from) { return _mm_set1_pd(from); } inline __m128i ei_pset1(const int& from) { return _mm_set1_epi32(from); } @@ -121,15 +129,39 @@ inline void ei_pstore(float* to, const __m128& from) { _mm_store_ps(to, from); inline void ei_pstore(double* to, const __m128d& from) { _mm_store_pd(to, from); } inline void ei_pstore(int* to, const __m128i& from) { _mm_store_si128(reinterpret_cast<__m128i*>(to), from); } +inline void ei_pstoreu(float* to, const __m128& from) { _mm_storeu_ps(to, from); } +inline void ei_pstoreu(double* to, const __m128d& from) { _mm_storeu_pd(to, from); } +inline void ei_pstoreu(int* to, const __m128i& from) { _mm_store_si128(reinterpret_cast<__m128i*>(to), from); } + inline float ei_pfirst(const __m128& a) { return _mm_cvtss_f32(a); } inline double ei_pfirst(const __m128d& a) { return _mm_cvtsd_f64(a); } inline int ei_pfirst(const __m128i& a) { return _mm_cvtsi128_si32(a); } +#ifdef __SSE3__ +// TODO implement SSE2 versions as well as integer versions +inline __m128 ei_predux(const __m128* vecs) +{ + return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3])); +} +inline __m128d ei_predux(const __m128d* vecs) +{ + return _mm_hadd_pd(vecs[0], vecs[1]); +} + +inline float ei_predux(const __m128& a) +{ + __m128 tmp0 = _mm_hadd_ps(a,a); + return ei_pfirst(_mm_hadd_ps(tmp0, tmp0)); +} + +inline double ei_predux(const __m128d& a) { return ei_pfirst(_mm_hadd_pd(a, a)); } +#endif + #elif defined(EIGEN_VECTORIZE_ALTIVEC) #ifdef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD #undef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD -#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4 +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4 #endif static const vector int v0i = vec_splat_u32(0); diff --git a/Eigen/src/Core/Product.h b/Eigen/src/Core/Product.h index 160d437fa..c1b2f5457 100644 --- a/Eigen/src/Core/Product.h +++ b/Eigen/src/Core/Product.h @@ -69,7 +69,7 @@ struct ei_packet_product_unroller static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) { ei_packet_product_unroller::run(row, col, lhs, rhs, res); - res = ei_pmadd(ei_pset1(lhs.coeff(row, Index)), rhs.packetCoeff(Index, col), res); + res = ei_pmadd(ei_pset1(lhs.coeff(row, Index)), rhs.template packetCoeff(Index, col), res); } }; @@ -79,7 +79,7 @@ struct ei_packet_product_unroller static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) { ei_packet_product_unroller::run(row, col, lhs, rhs, res); - res = ei_pmadd(lhs.packetCoeff(row, Index), ei_pset1(rhs.coeff(Index, col)), res); + res = ei_pmadd(lhs.template packetCoeff(row, Index), ei_pset1(rhs.coeff(Index, col)), res); } }; @@ -88,7 +88,7 @@ struct ei_packet_product_unroller { static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) { - res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.packetCoeff(0, col)); + res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packetCoeff(0, col)); } }; @@ -97,7 +97,7 @@ struct ei_packet_product_unroller { static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) { - res = ei_pmul(lhs.packetCoeff(row, 0), ei_pset1(rhs.coeff(0, col))); + res = ei_pmul(lhs.template packetCoeff(row, 0), ei_pset1(rhs.coeff(0, col))); } }; @@ -196,10 +196,10 @@ template class Product : ei_no_assignm } /** \internal */ - template + template void _cacheOptimalEval(DestDerived& res, ei_meta_false) const; #ifdef EIGEN_VECTORIZE - template + template void _cacheOptimalEval(DestDerived& res, ei_meta_true) const; #endif @@ -228,6 +228,7 @@ template class Product : ei_no_assignm return res; } + template PacketScalar _packetCoeff(int row, int col) const { if(Lhs::ColsAtCompileTime <= EIGEN_UNROLLING_LIMIT) @@ -247,21 +248,30 @@ template class Product : ei_no_assignm PacketScalar _packetCoeffRowMajor(int row, int col) const { PacketScalar res; - res = ei_pmul(ei_pset1(m_lhs.coeff(row, 0)),m_rhs.packetCoeff(0, col)); + res = ei_pmul(ei_pset1(m_lhs.coeff(row, 0)),m_rhs.template packetCoeff(0, col)); for(int i = 1; i < m_lhs.cols(); i++) - res = ei_pmadd(ei_pset1(m_lhs.coeff(row, i)), m_rhs.packetCoeff(i, col), res); + res = ei_pmadd(ei_pset1(m_lhs.coeff(row, i)), m_rhs.template packetCoeff(i, col), res); return res; } PacketScalar _packetCoeffColumnMajor(int row, int col) const { PacketScalar res; - res = ei_pmul(m_lhs.packetCoeff(row, 0), ei_pset1(m_rhs.coeff(0, col))); + res = ei_pmul(m_lhs.template packetCoeff(row, 0), ei_pset1(m_rhs.coeff(0, col))); for(int i = 1; i < m_lhs.cols(); i++) - res = ei_pmadd(m_lhs.packetCoeff(row, i), ei_pset1(m_rhs.coeff(i, col)), res); + res = ei_pmadd(m_lhs.template packetCoeff(row, i), ei_pset1(m_rhs.coeff(i, col)), res); return res; +// const PacketScalar tmp[4]; +// ei_punpack(m_rhs.packetCoeff(0,col), tmp); +// +// return +// ei_pmadd(m_lhs.packetCoeff(row, 0), tmp[0], +// ei_pmadd(m_lhs.packetCoeff(row, 1), tmp[1], +// ei_pmadd(m_lhs.packetCoeff(row, 2), tmp[2] +// ei_pmul(m_lhs.packetCoeff(row, 3), tmp[3])))); } + protected: const LhsNested m_lhs; const RhsNested m_rhs; @@ -298,7 +308,7 @@ template template Derived& MatrixBase::lazyAssign(const Product& product) { - product._cacheOptimalEval(*this, + product.template _cacheOptimalEval(derived(), #ifdef EIGEN_VECTORIZE typename ei_meta_if::ret() #else @@ -309,7 +319,7 @@ Derived& MatrixBase::lazyAssign(const Product -template +template void Product::_cacheOptimalEval(DestDerived& res, ei_meta_false) const { res.setZero(); @@ -372,14 +382,14 @@ void Product::_cacheOptimalEval(DestDerived& res, ei_meta_fals #ifdef EIGEN_VECTORIZE template -template +template void Product::_cacheOptimalEval(DestDerived& res, ei_meta_true) const { if (((Lhs::Flags&RowMajorBit) && (_cols() % ei_packet_traits::size != 0)) || (_rows() % ei_packet_traits::size != 0)) { - return _cacheOptimalEval(res, ei_meta_false()); + return _cacheOptimalEval(res, ei_meta_false()); } res.setZero(); @@ -398,12 +408,12 @@ void Product::_cacheOptimalEval(DestDerived& res, ei_meta_true const typename ei_packet_traits::type tmp3 = ei_pset1(m_lhs.coeff(k,j+3)); for (int i=0; icols(); i+=ei_packet_traits::size) { - res.writePacketCoeff(k,i, - ei_pmadd(tmp0, m_rhs.packetCoeff(j+0,i), - ei_pmadd(tmp1, m_rhs.packetCoeff(j+1,i), - ei_pmadd(tmp2, m_rhs.packetCoeff(j+2,i), - ei_pmadd(tmp3, m_rhs.packetCoeff(j+3,i), - res.packetCoeff(k,i))))) + res.template writePacketCoeff(k,i, + ei_pmadd(tmp0, m_rhs.template packetCoeff(j+0,i), + ei_pmadd(tmp1, m_rhs.template packetCoeff(j+1,i), + ei_pmadd(tmp2, m_rhs.template packetCoeff(j+2,i), + ei_pmadd(tmp3, m_rhs.template packetCoeff(j+3,i), + res.template packetCoeff(k,i))))) ); } } @@ -414,41 +424,44 @@ void Product::_cacheOptimalEval(DestDerived& res, ei_meta_true { const typename ei_packet_traits::type tmp = ei_pset1(m_lhs.coeff(k,j)); for (int i=0; icols(); i+=ei_packet_traits::size) - res.writePacketCoeff(k,i, ei_pmadd(tmp, m_rhs.packetCoeff(j,i), res.packetCoeff(k,i))); + res.template writePacketCoeff(k,i, + ei_pmadd(tmp, m_rhs.template packetCoeff(j,i), res.template packetCoeff(k,i))); } } } else { // std::cout << "packet lhs\n"; - int j=0; - for(; jcols(); k++) + for(int j=0; jcols(); j+=1) { - const typename ei_packet_traits::type tmp0 = ei_pset1(m_rhs.coeff(j+0,k)); - const typename ei_packet_traits::type tmp1 = ei_pset1(m_rhs.coeff(j+1,k)); - const typename ei_packet_traits::type tmp2 = ei_pset1(m_rhs.coeff(j+2,k)); - const typename ei_packet_traits::type tmp3 = ei_pset1(m_rhs.coeff(j+3,k)); + const typename ei_packet_traits::type tmp0 = ei_pset1(m_rhs.coeff(k+0,j)); + const typename ei_packet_traits::type tmp1 = ei_pset1(m_rhs.coeff(k+1,j)); + const typename ei_packet_traits::type tmp2 = ei_pset1(m_rhs.coeff(k+2,j)); + const typename ei_packet_traits::type tmp3 = ei_pset1(m_rhs.coeff(k+3,j)); + for (int i=0; irows(); i+=ei_packet_traits::size) { - res.writePacketCoeff(i,k, - ei_pmadd(tmp0, m_lhs.packetCoeff(i,j), - ei_pmadd(tmp1, m_lhs.packetCoeff(i,j+1), - ei_pmadd(tmp2, m_lhs.packetCoeff(i,j+2), - ei_pmadd(tmp3, m_lhs.packetCoeff(i,j+3), - res.packetCoeff(i,k))))) + res.template writePacketCoeff(i,j, + ei_pmadd(tmp0, m_lhs.template packetCoeff(i,k), + ei_pmadd(tmp1, m_lhs.template packetCoeff(i,k+1), + ei_pmadd(tmp2, m_lhs.template packetCoeff(i,k+2), + ei_pmadd(tmp3, m_lhs.template packetCoeff(i,k+3), + res.template packetCoeff(i,j))))) ); } } } - for(; jcols(); k++) + for(int j=0; jcols(); j++) { - const typename ei_packet_traits::type tmp = ei_pset1(m_rhs.coeff(j,k)); + const typename ei_packet_traits::type tmp = ei_pset1(m_rhs.coeff(k,j)); for (int i=0; irows(); i+=ei_packet_traits::size) - res.writePacketCoeff(i,k, ei_pmadd(tmp, m_lhs.packetCoeff(i,j), res.packetCoeff(i,k))); + res.template writePacketCoeff(k,j, + ei_pmadd(tmp, m_lhs.template packetCoeff(i,k), res.template packetCoeff(i,j))); } } } diff --git a/Eigen/src/Core/ProductWIP.h b/Eigen/src/Core/ProductWIP.h new file mode 100644 index 000000000..a1c10d5d8 --- /dev/null +++ b/Eigen/src/Core/ProductWIP.h @@ -0,0 +1,496 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_PRODUCT_H +#define EIGEN_PRODUCT_H + +#ifndef EIGEN_VECTORIZE +#error you must enable vectorization to try this experimental product implementation +#endif + +template +struct ei_product_unroller +{ + static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, + typename Lhs::Scalar &res) + { + ei_product_unroller::run(row, col, lhs, rhs, res); + res += lhs.coeff(row, Index) * rhs.coeff(Index, col); + } +}; + +template +struct ei_product_unroller<0, Size, Lhs, Rhs> +{ + static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, + typename Lhs::Scalar &res) + { + res = lhs.coeff(row, 0) * rhs.coeff(0, col); + } +}; + +template +struct ei_product_unroller +{ + static void run(int, int, const Lhs&, const Rhs&, typename Lhs::Scalar&) {} +}; + +// prevent buggy user code from causing an infinite recursion +template +struct ei_product_unroller +{ + static void run(int, int, const Lhs&, const Rhs&, typename Lhs::Scalar&) {} +}; + +template +struct ei_packet_product_unroller; + +template +struct ei_packet_product_unroller +{ + static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) + { + ei_packet_product_unroller::run(row, col, lhs, rhs, res); + res = ei_pmadd(ei_pset1(lhs.coeff(row, Index)), rhs.packetCoeff(Index, col), res); + } +}; + +template +struct ei_packet_product_unroller +{ + static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) + { + ei_packet_product_unroller::run(row, col, lhs, rhs, res); + res = ei_pmadd(lhs.packetCoeff(row, Index), ei_pset1(rhs.coeff(Index, col)), res); + } +}; + +template +struct ei_packet_product_unroller +{ + static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) + { + res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.packetCoeff(0, col)); + } +}; + +template +struct ei_packet_product_unroller +{ + static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) + { + res = ei_pmul(lhs.packetCoeff(row, 0), ei_pset1(rhs.coeff(0, col))); + } +}; + +template +struct ei_packet_product_unroller +{ + static void run(int, int, const Lhs&, const Rhs&, PacketScalar&) {} +}; + +template struct ProductPacketCoeffImpl { + inline static typename Product::PacketScalar execute(const Product& product, int row, int col) + { return product._packetCoeffRowMajor(row,col); } +}; + +template struct ProductPacketCoeffImpl { + inline static typename Product::PacketScalar execute(const Product& product, int row, int col) + { return product._packetCoeffColumnMajor(row,col); } +}; + +/** \class Product + * + * \brief Expression of the product of two matrices + * + * \param Lhs the type of the left-hand side + * \param Rhs the type of the right-hand side + * \param EvalMode internal use only + * + * This class represents an expression of the product of two matrices. + * It is the return type of the operator* between matrices, and most of the time + * this is the only way it is used. + * + * \sa class Sum, class Difference + */ +template struct ei_product_eval_mode +{ + enum{ value = Lhs::MaxRowsAtCompileTime >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD + && Rhs::MaxColsAtCompileTime >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD + && (!( (Lhs::Flags&RowMajorBit) && ((Rhs::Flags&RowMajorBit) ^ RowMajorBit))) + ? CacheOptimalProduct : NormalProduct }; +}; + +template +struct ei_traits > +{ + typedef typename Lhs::Scalar Scalar; + typedef typename ei_nested::type LhsNested; + typedef typename ei_nested::type RhsNested; + typedef typename ei_unref::type _LhsNested; + typedef typename ei_unref::type _RhsNested; + enum { + LhsCoeffReadCost = _LhsNested::CoeffReadCost, + RhsCoeffReadCost = _RhsNested::CoeffReadCost, + LhsFlags = _LhsNested::Flags, + RhsFlags = _RhsNested::Flags, + RowsAtCompileTime = Lhs::RowsAtCompileTime, + ColsAtCompileTime = Rhs::ColsAtCompileTime, + MaxRowsAtCompileTime = Lhs::MaxRowsAtCompileTime, + MaxColsAtCompileTime = Rhs::MaxColsAtCompileTime, + _RhsVectorizable = (RhsFlags & RowMajorBit) && (RhsFlags & VectorizableBit) && (ColsAtCompileTime % ei_packet_traits::size == 0), + _LhsVectorizable = (!(LhsFlags & RowMajorBit)) && (LhsFlags & VectorizableBit) && (RowsAtCompileTime % ei_packet_traits::size == 0), + _Vectorizable = (_LhsVectorizable || _RhsVectorizable) ? 1 : 0, + _RowMajor = (RhsFlags & RowMajorBit) + && (EvalMode==(int)CacheOptimalProduct ? (int)LhsFlags & RowMajorBit : (!_LhsVectorizable)), + _LostBits = DefaultLostFlagMask & ~( + (_RowMajor ? 0 : RowMajorBit) + | ((RowsAtCompileTime == Dynamic || ColsAtCompileTime == Dynamic) ? 0 : LargeBit)), + Flags = ((unsigned int)(LhsFlags | RhsFlags) & _LostBits) +// | EvalBeforeAssigningBit //FIXME + | EvalBeforeNestingBit + | (_Vectorizable ? VectorizableBit : 0), + CoeffReadCost + = Lhs::ColsAtCompileTime == Dynamic + ? Dynamic + : Lhs::ColsAtCompileTime + * (NumTraits::MulCost + LhsCoeffReadCost + RhsCoeffReadCost) + + (Lhs::ColsAtCompileTime - 1) * NumTraits::AddCost + }; +}; + +template class Product : ei_no_assignment_operator, + public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(Product) + friend class ProductPacketCoeffImpl; + typedef typename ei_traits::LhsNested LhsNested; + typedef typename ei_traits::RhsNested RhsNested; + typedef typename ei_traits::_LhsNested _LhsNested; + typedef typename ei_traits::_RhsNested _RhsNested; + + Product(const Lhs& lhs, const Rhs& rhs) + : m_lhs(lhs), m_rhs(rhs) + { + ei_assert(lhs.cols() == rhs.rows()); + } + + /** \internal */ + template void _cacheFriendlyEval(DestDerived& res) const; + + private: + + int _rows() const { return m_lhs.rows(); } + int _cols() const { return m_rhs.cols(); } + + const Scalar _coeff(int row, int col) const + { + Scalar res; + const bool unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT; + if(unroll) + { + ei_product_unroller + ::run(row, col, m_lhs, m_rhs, res); + } + else + { + res = m_lhs.coeff(row, 0) * m_rhs.coeff(0, col); + for(int i = 1; i < m_lhs.cols(); i++) + res += m_lhs.coeff(row, i) * m_rhs.coeff(i, col); + } + return res; + } + + template + PacketScalar _packetCoeff(int row, int col) const + { + if(Lhs::ColsAtCompileTime <= EIGEN_UNROLLING_LIMIT) + { + PacketScalar res; + ei_packet_product_unroller + ::run(row, col, m_lhs, m_rhs, res); + return res; + } + else + return ProductPacketCoeffImpl::execute(*this, row, col); + } + + PacketScalar _packetCoeffRowMajor(int row, int col) const + { + PacketScalar res; + res = ei_pmul(ei_pset1(m_lhs.coeff(row, 0)),m_rhs.packetCoeff(0, col)); + for(int i = 1; i < m_lhs.cols(); i++) + res = ei_pmadd(ei_pset1(m_lhs.coeff(row, i)), m_rhs.packetCoeff(i, col), res); + return res; + } + + PacketScalar _packetCoeffColumnMajor(int row, int col) const + { + PacketScalar res; + res = ei_pmul(m_lhs.packetCoeff(row, 0), ei_pset1(m_rhs.coeff(0, col))); + for(int i = 1; i < m_lhs.cols(); i++) + res = ei_pmadd(m_lhs.packetCoeff(row, i), ei_pset1(m_rhs.coeff(i, col)), res); + return res; + } + + + protected: + const LhsNested m_lhs; + const RhsNested m_rhs; +}; + +/** \returns the matrix product of \c *this and \a other. + * + * \note This function causes an immediate evaluation. If you want to perform a matrix product + * without immediate evaluation, call .lazy() on one of the matrices before taking the product. + * + * \sa lazy(), operator*=(const MatrixBase&) + */ +template +template +const Product +MatrixBase::operator*(const MatrixBase &other) const +{ + return Product(derived(), other.derived()); +} + +/** replaces \c *this by \c *this * \a other. + * + * \returns a reference to \c *this + */ +template +template +Derived & +MatrixBase::operator*=(const MatrixBase &other) +{ + return *this = *this * other; +} + +template +template +Derived& MatrixBase::lazyAssign(const Product& product) +{ + product._cacheFriendlyEval(*this); + return derived(); +} + +template +template +void Product::_cacheFriendlyEval(DestDerived& res) const +{ + // allow direct access to data for benchmark purpose + const Scalar* __restrict__ a = m_lhs.derived().data(); + const Scalar* __restrict__ b = m_rhs.derived().data(); + Scalar* __restrict__ c = res.derived().data(); + + // FIXME find a way to optimize: (an_xpr) + (a * b) + // then we don't need to clear res and avoid and additional mat-mat sum +// res.setZero(); + + const int ps = ei_packet_traits::size; // size of a packet + #if (defined __i386__) + // i386 architectures provides only 8 xmmm register, + // so let's reduce the max number of rows processed at once + const int bw = 4; // number of rows treated at once + #else + const int bw = 8; // number of rows treated at once + #endif + const int bs = ps * bw; // total number of elements treated at once + const int rows = _rows(); + const int cols = _cols(); + const int size = m_lhs.cols(); // third dimension of the product + const int l2blocksize = 256 > _cols() ? _cols() : 256; + const bool rhsIsAligned = ((size%ps) == 0); + const bool resIsAligned = ((cols%ps) == 0); + Scalar* __restrict__ block = new Scalar[l2blocksize*size]; + + // loops on each L2 cache friendly blocks of the result + for(int l2i=0; l2i<_rows(); l2i+=l2blocksize) + { + const int l2blockRowEnd = std::min(l2i+l2blocksize, rows); + const int l2blockRowEndBW = l2blockRowEnd & 0xFFFFF8; // end of the rows aligned to bw + const int l2blockRowRemaining = l2blockRowEnd - l2blockRowEndBW; // number of remaining rows + + // build a cache friendly block + int count = 0; + + // copy l2blocksize rows of m_lhs to blocks of ps x bw + for(int l2k=0; l2k0) + { + for (int k=l2k; k(l1i, l1j, ei_padd(res.template packetCoeff(l1i, l1j), ei_predux(dst))); + if (ps==2) + res.template writePacketCoeff(l1i+2,l1j, ei_padd(res.template packetCoeff(l1i+2,l1j), ei_predux(&(dst[2])))); + if (bw==8) + { + res.template writePacketCoeff(l1i+4,l1j, ei_padd(res.template packetCoeff(l1i+4,l1j), ei_predux(&(dst[4])))); + if (ps==2) + res.template writePacketCoeff(l1i+6,l1j, ei_padd(res.template packetCoeff(l1i+6,l1j), ei_predux(&(dst[6])))); + } + + asm("#eigen endcore"); + } + } + if (l2blockRowRemaining>0) + { + // TODO optimize this part using a generic templated function that processes N rows + // here we process the remaining l2blockRowRemaining rows + for(int l1j=l2j; l1j class Temporary return m_expression.coeff(row, col); } + template PacketScalar _packetCoeff(int row, int col) const { - return m_expression.packetCoeff(row, col); + return m_expression.template packetCoeff(row, col); } protected: diff --git a/Eigen/src/Core/Transpose.h b/Eigen/src/Core/Transpose.h index c2ca4a63d..e4af78c7c 100644 --- a/Eigen/src/Core/Transpose.h +++ b/Eigen/src/Core/Transpose.h @@ -79,14 +79,16 @@ template class Transpose return m_matrix.coeff(col, row); } + template PacketScalar _packetCoeff(int row, int col) const { - return m_matrix.packetCoeff(col, row); + return m_matrix.template packetCoeff(col, row); } + template void _writePacketCoeff(int row, int col, const PacketScalar& x) { - m_matrix.const_cast_derived().writePacketCoeff(col, row, x); + m_matrix.const_cast_derived().template writePacketCoeff(col, row, x); } protected: diff --git a/Eigen/src/Core/util/Constants.h b/Eigen/src/Core/util/Constants.h index ae0451156..c74d8f87b 100644 --- a/Eigen/src/Core/util/Constants.h +++ b/Eigen/src/Core/util/Constants.h @@ -45,6 +45,7 @@ const unsigned int NullLowerBit = 0x200; ///< means the strictly triangular l const unsigned int NullUpperBit = 0x400; ///< means the strictly triangular upper part is 0 enum { Upper=NullLowerBit, Lower=NullUpperBit }; +enum { Aligned=0, UnAligned=1 }; // list of flags that are lost by default const unsigned int DefaultLostFlagMask = ~(VectorizableBit | Like1DArrayBit | NullDiagBit | UnitDiagBit | NullLowerBit | NullUpperBit); -- cgit v1.2.3