aboutsummaryrefslogtreecommitdiffhomepage
path: root/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2014-03-26 23:22:36 +0100
committerGravatar Gael Guennebaud <g.gael@free.fr>2014-03-26 23:22:36 +0100
commitf0a4c9d5abe94703d8b4e5ec49cba20df014d0ca (patch)
tree6bf541081445af66481d146a7a2b9fcca7cd434d /Eigen/src/Core/products/SelfadjointMatrixMatrix.h
parent8be011e7765cf5d30cad86edd55850b0b1277741 (diff)
Update gebp kernel to process a panle of 4 columns at once for the remaining ones.
Diffstat (limited to 'Eigen/src/Core/products/SelfadjointMatrixMatrix.h')
-rw-r--r--Eigen/src/Core/products/SelfadjointMatrixMatrix.h149
1 files changed, 96 insertions, 53 deletions
diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
index d9fd9f556..34480c707 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
@@ -82,7 +82,8 @@ struct symm_pack_rhs
Index end_k = k2 + rows;
Index count = 0;
const_blas_data_mapper<Scalar,Index,StorageOrder> rhs(_rhs,rhsStride);
- Index packet_cols = (cols/nr)*nr;
+ Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
+ Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
// first part: normal case
for(Index j2=0; j2<k2; j2+=nr)
@@ -108,90 +109,134 @@ struct symm_pack_rhs
}
// second part: diagonal block
- for(Index j2=k2; j2<(std::min)(k2+rows,packet_cols); j2+=nr)
+ Index end8 = nr>=8 ? (std::min)(k2+rows,packet_cols8) : k2;
+ if(nr>=8)
{
- // again we can split vertically in three different parts (transpose, symmetric, normal)
- // transpose
- for(Index k=k2; k<j2; k++)
+ for(Index j2=k2; j2<end8; j2+=8)
{
- blockB[count+0] = numext::conj(rhs(j2+0,k));
- blockB[count+1] = numext::conj(rhs(j2+1,k));
- if (nr>=4)
+ // again we can split vertically in three different parts (transpose, symmetric, normal)
+ // transpose
+ for(Index k=k2; k<j2; k++)
{
+ blockB[count+0] = numext::conj(rhs(j2+0,k));
+ blockB[count+1] = numext::conj(rhs(j2+1,k));
blockB[count+2] = numext::conj(rhs(j2+2,k));
blockB[count+3] = numext::conj(rhs(j2+3,k));
- }
- if (nr>=8)
- {
blockB[count+4] = numext::conj(rhs(j2+4,k));
blockB[count+5] = numext::conj(rhs(j2+5,k));
blockB[count+6] = numext::conj(rhs(j2+6,k));
blockB[count+7] = numext::conj(rhs(j2+7,k));
+ count += 8;
}
- count += nr;
- }
- // symmetric
- Index h = 0;
- for(Index k=j2; k<j2+nr; k++)
- {
- // normal
- for (Index w=0 ; w<h; ++w)
- blockB[count+w] = rhs(k,j2+w);
+ // symmetric
+ Index h = 0;
+ for(Index k=j2; k<j2+8; k++)
+ {
+ // normal
+ for (Index w=0 ; w<h; ++w)
+ blockB[count+w] = rhs(k,j2+w);
- blockB[count+h] = numext::real(rhs(k,k));
+ blockB[count+h] = numext::real(rhs(k,k));
- // transpose
- for (Index w=h+1 ; w<nr; ++w)
- blockB[count+w] = numext::conj(rhs(j2+w,k));
- count += nr;
- ++h;
- }
- // normal
- for(Index k=j2+nr; k<end_k; k++)
- {
- blockB[count+0] = rhs(k,j2+0);
- blockB[count+1] = rhs(k,j2+1);
- if (nr>=4)
+ // transpose
+ for (Index w=h+1 ; w<8; ++w)
+ blockB[count+w] = numext::conj(rhs(j2+w,k));
+ count += 8;
+ ++h;
+ }
+ // normal
+ for(Index k=j2+8; k<end_k; k++)
{
+ blockB[count+0] = rhs(k,j2+0);
+ blockB[count+1] = rhs(k,j2+1);
blockB[count+2] = rhs(k,j2+2);
blockB[count+3] = rhs(k,j2+3);
- }
- if (nr>=8)
- {
blockB[count+4] = rhs(k,j2+4);
blockB[count+5] = rhs(k,j2+5);
blockB[count+6] = rhs(k,j2+6);
blockB[count+7] = rhs(k,j2+7);
+ count += 8;
}
- count += nr;
}
}
-
- // third part: transposed
- for(Index j2=k2+rows; j2<packet_cols; j2+=nr)
+ if(nr>=4)
{
- for(Index k=k2; k<end_k; k++)
+ for(Index j2=end8; j2<(std::min)(k2+rows,packet_cols4); j2+=4)
{
- blockB[count+0] = numext::conj(rhs(j2+0,k));
- blockB[count+1] = numext::conj(rhs(j2+1,k));
- if (nr>=4)
+ // again we can split vertically in three different parts (transpose, symmetric, normal)
+ // transpose
+ for(Index k=k2; k<j2; k++)
{
+ blockB[count+0] = numext::conj(rhs(j2+0,k));
+ blockB[count+1] = numext::conj(rhs(j2+1,k));
blockB[count+2] = numext::conj(rhs(j2+2,k));
blockB[count+3] = numext::conj(rhs(j2+3,k));
+ count += 4;
}
- if (nr>=8)
+ // symmetric
+ Index h = 0;
+ for(Index k=j2; k<j2+4; k++)
+ {
+ // normal
+ for (Index w=0 ; w<h; ++w)
+ blockB[count+w] = rhs(k,j2+w);
+
+ blockB[count+h] = numext::real(rhs(k,k));
+
+ // transpose
+ for (Index w=h+1 ; w<4; ++w)
+ blockB[count+w] = numext::conj(rhs(j2+w,k));
+ count += 4;
+ ++h;
+ }
+ // normal
+ for(Index k=j2+4; k<end_k; k++)
+ {
+ blockB[count+0] = rhs(k,j2+0);
+ blockB[count+1] = rhs(k,j2+1);
+ blockB[count+2] = rhs(k,j2+2);
+ blockB[count+3] = rhs(k,j2+3);
+ count += 4;
+ }
+ }
+ }
+
+ // third part: transposed
+ if(nr>=8)
+ {
+ for(Index j2=k2+rows; j2<packet_cols8; j2+=8)
+ {
+ for(Index k=k2; k<end_k; k++)
{
+ blockB[count+0] = numext::conj(rhs(j2+0,k));
+ blockB[count+1] = numext::conj(rhs(j2+1,k));
+ blockB[count+2] = numext::conj(rhs(j2+2,k));
+ blockB[count+3] = numext::conj(rhs(j2+3,k));
blockB[count+4] = numext::conj(rhs(j2+4,k));
blockB[count+5] = numext::conj(rhs(j2+5,k));
blockB[count+6] = numext::conj(rhs(j2+6,k));
blockB[count+7] = numext::conj(rhs(j2+7,k));
+ count += 8;
+ }
+ }
+ }
+ if(nr>=4)
+ {
+ for(Index j2=(std::max)(packet_cols8,k2+rows); j2<packet_cols4; j2+=4)
+ {
+ for(Index k=k2; k<end_k; k++)
+ {
+ blockB[count+0] = numext::conj(rhs(j2+0,k));
+ blockB[count+1] = numext::conj(rhs(j2+1,k));
+ blockB[count+2] = numext::conj(rhs(j2+2,k));
+ blockB[count+3] = numext::conj(rhs(j2+3,k));
+ count += 4;
}
- count += nr;
}
}
// copy the remaining columns one at a time (=> the same with nr==1)
- for(Index j2=packet_cols; j2<cols; ++j2)
+ for(Index j2=packet_cols4; j2<cols; ++j2)
{
// transpose
Index half = (std::min)(end_k,j2);
@@ -289,11 +334,10 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,t
// kc must smaller than mc
kc = (std::min)(kc,mc);
- std::size_t sizeW = kc*Traits::WorkSpaceFactor;
- std::size_t sizeB = sizeW + kc*cols;
+ std::size_t sizeB = kc*cols;
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
- Scalar* blockB = allocatedBlockB + sizeW;
+ Scalar* blockB = allocatedBlockB;
gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
symm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
@@ -376,11 +420,10 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,f
Index mc = rows; // cache block size along the M direction
Index nc = cols; // cache block size along the N direction
computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
- std::size_t sizeW = kc*Traits::WorkSpaceFactor;
- std::size_t sizeB = sizeW + kc*cols;
+ std::size_t sizeB = kc*cols;
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
- Scalar* blockB = allocatedBlockB + sizeW;
+ Scalar* blockB = allocatedBlockB;
gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;