aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Gael Guennebaud <g.gael@free.fr>2014-03-26 23:22:36 +0100
committerGravatar Gael Guennebaud <g.gael@free.fr>2014-03-26 23:22:36 +0100
commitf0a4c9d5abe94703d8b4e5ec49cba20df014d0ca (patch)
tree6bf541081445af66481d146a7a2b9fcca7cd434d
parent8be011e7765cf5d30cad86edd55850b0b1277741 (diff)
Update gebp kernel to process a panle of 4 columns at once for the remaining ones.
-rw-r--r--Eigen/src/Core/products/GeneralBlockPanelKernel.h448
-rw-r--r--Eigen/src/Core/products/SelfadjointMatrixMatrix.h149
2 files changed, 370 insertions, 227 deletions
diff --git a/Eigen/src/Core/products/GeneralBlockPanelKernel.h b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
index 8a398d912..2b520383b 100644
--- a/Eigen/src/Core/products/GeneralBlockPanelKernel.h
+++ b/Eigen/src/Core/products/GeneralBlockPanelKernel.h
@@ -167,8 +167,6 @@ public:
// register block size along the M direction (currently, this one cannot be modified)
mr = LhsPacketSize,
- WorkSpaceFactor = nr * RhsPacketSize,
-
LhsProgress = LhsPacketSize,
RhsProgress = 1
};
@@ -242,7 +240,6 @@ public:
NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
nr = NumberOfRegisters/2,
mr = LhsPacketSize,
- WorkSpaceFactor = nr*RhsPacketSize,
LhsProgress = LhsPacketSize,
RhsProgress = 1
@@ -327,7 +324,6 @@ public:
// FIXME: should depend on NumberOfRegisters
nr = 4,
mr = ResPacketSize,
- WorkSpaceFactor = Vectorizable ? 2*nr*RealPacketSize : nr,
LhsProgress = ResPacketSize,
RhsProgress = 1
@@ -459,7 +455,6 @@ public:
// FIXME: should depend on NumberOfRegisters
nr = 4,
mr = ResPacketSize,
- WorkSpaceFactor = nr*RhsPacketSize,
LhsProgress = ResPacketSize,
RhsProgress = 1
@@ -564,63 +559,45 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
if(strideA==-1) strideA = depth;
if(strideB==-1) strideB = depth;
conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
- Index packet_cols = (cols/nr) * nr;
+ Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
+ Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
// Here we assume that mr==LhsProgress
const Index peeled_mc = (rows/mr)*mr;
const Index peeled_kc = (depth/4)*4;
// loops on each micro vertical panel of rhs (depth x nr)
- for(Index j2=0; j2<packet_cols; j2+=nr)
+ // First pass using depth x 8 panels
+ if(nr>=8)
{
- // loops on each largest micro horizontal panel of lhs (mr x depth)
- // => we select a mr x nr micro block of res which is entirely
- // stored into mr/packet_size x nr registers.
- for(Index i=0; i<peeled_mc; i+=mr)
+ for(Index j2=0; j2<packet_cols8; j2+=nr)
{
- const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
- // prefetch(&blA[0]);
-
- // gets res block as register
- AccPacket C0, C1, C2, C3, C4, C5, C6, C7;
- traits.initAcc(C0);
- traits.initAcc(C1);
- traits.initAcc(C2);
- traits.initAcc(C3);
- if(nr==8) traits.initAcc(C4);
- if(nr==8) traits.initAcc(C5);
- if(nr==8) traits.initAcc(C6);
- if(nr==8) traits.initAcc(C7);
-
- ResScalar* r0 = &res[(j2+0)*resStride + i];
-
- // performs "inner" products
- const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
- LhsPacket A0, A1;
- // uncomment for register prefetching
- // traits.loadLhs(blA, A0);
- for(Index k=0; k<peeled_kc; k+=4)
+ // loops on each largest micro horizontal panel of lhs (mr x depth)
+ // => we select a mr x nr micro block of res which is entirely
+ // stored into mr/packet_size x nr registers.
+ for(Index i=0; i<peeled_mc; i+=mr)
{
- if(nr==4)
- {
- EIGEN_ASM_COMMENT("begin gegp micro kernel 1p x 4");
-
- RhsPacket B_0, B1;
-
-#define EIGEN_GEBGP_ONESTEP4(K) \
- traits.loadLhs(&blA[K*LhsProgress], A0); \
- traits.broadcastRhs(&blB[0+4*K*RhsProgress], B_0, B1); \
- traits.madd(A0, B_0,C0, B_0); \
- traits.madd(A0, B1, C1, B1); \
- traits.broadcastRhs(&blB[2+4*K*RhsProgress], B_0, B1); \
- traits.madd(A0, B_0,C2, B_0); \
- traits.madd(A0, B1, C3, B1)
-
- EIGEN_GEBGP_ONESTEP4(0);
- EIGEN_GEBGP_ONESTEP4(1);
- EIGEN_GEBGP_ONESTEP4(2);
- EIGEN_GEBGP_ONESTEP4(3);
- }
- else // nr==8
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
+ // prefetch(&blA[0]);
+
+ // gets res block as register
+ AccPacket C0, C1, C2, C3, C4, C5, C6, C7;
+ traits.initAcc(C0);
+ traits.initAcc(C1);
+ traits.initAcc(C2);
+ traits.initAcc(C3);
+ traits.initAcc(C4);
+ traits.initAcc(C5);
+ traits.initAcc(C6);
+ traits.initAcc(C7);
+
+ ResScalar* r0 = &res[(j2+0)*resStride + i];
+
+ // performs "inner" products
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
+ LhsPacket A0, A1;
+ // uncomment for register prefetching
+ // traits.loadLhs(blA, A0);
+ for(Index k=0; k<peeled_kc; k+=4)
{
EIGEN_ASM_COMMENT("begin gegp micro kernel 1p x 8");
RhsPacket B_0, B1, B2, B3;
@@ -660,35 +637,23 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
EIGEN_GEBGP_ONESTEP8(1,A0,A1);
EIGEN_GEBGP_ONESTEP8(2,A1,A0);
EIGEN_GEBGP_ONESTEP8(3,A0,A1);
- }
- blB += 4*nr*RhsProgress;
- blA += 4*mr;
- }
- // process remaining peeled loop
- for(Index k=peeled_kc; k<depth; k++)
- {
- if(nr==4)
- {
- RhsPacket B_0, B1;
- EIGEN_GEBGP_ONESTEP4(0);
+ blB += 4*8*RhsProgress;
+ blA += 4*mr;
}
- else // nr == 8
+ // process remaining peeled loop
+ for(Index k=peeled_kc; k<depth; k++)
{
RhsPacket B_0, B1, B2, B3;
EIGEN_GEBGP_ONESTEP8(0,A1,A0);
// uncomment for register prefetching
// A0 = A1;
- }
- blB += nr*RhsProgress;
- blA += mr;
- }
-#undef EIGEN_GEBGP_ONESTEP4
-#undef EIGEN_GEBGP_ONESTEP8
+ blB += 8*RhsProgress;
+ blA += mr;
+ }
+ #undef EIGEN_GEBGP_ONESTEP8
- if(nr==8)
- {
ResPacket R0, R1, R2, R3, R4, R5, R6;
ResPacket alphav = pset1<ResPacket>(alpha);
@@ -718,9 +683,122 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
pstoreu(r0+5*resStride, R5);
pstoreu(r0+6*resStride, R6);
pstoreu(r0+7*resStride, R0);
+
+ }
+
+ for(Index i=peeled_mc; i<rows; i++)
+ {
+ const LhsScalar* blA = &blockA[i*strideA+offsetA];
+ prefetch(&blA[0]);
+
+ // gets a 1 x 8 res block as registers
+ ResScalar C0(0), C1(0), C2(0), C3(0), C4(0), C5(0), C6(0), C7(0);
+ // FIXME directly use blockB ???
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB*8];
+ // TODO peel this loop
+ for(Index k=0; k<depth; k++)
+ {
+ LhsScalar A0;
+ RhsScalar B_0, B_1;
+
+ A0 = blA[k];
+
+ B_0 = blB[0];
+ B_1 = blB[1];
+ MADD(cj,A0,B_0,C0, B_0);
+ MADD(cj,A0,B_1,C1, B_1);
+
+ B_0 = blB[2];
+ B_1 = blB[3];
+ MADD(cj,A0,B_0,C2, B_0);
+ MADD(cj,A0,B_1,C3, B_1);
+
+ B_0 = blB[4];
+ B_1 = blB[5];
+ MADD(cj,A0,B_0,C4, B_0);
+ MADD(cj,A0,B_1,C5, B_1);
+
+ B_0 = blB[6];
+ B_1 = blB[7];
+ MADD(cj,A0,B_0,C6, B_0);
+ MADD(cj,A0,B_1,C7, B_1);
+
+ blB += 8;
+ }
+ res[(j2+0)*resStride + i] += alpha*C0;
+ res[(j2+1)*resStride + i] += alpha*C1;
+ res[(j2+2)*resStride + i] += alpha*C2;
+ res[(j2+3)*resStride + i] += alpha*C3;
+ res[(j2+4)*resStride + i] += alpha*C4;
+ res[(j2+5)*resStride + i] += alpha*C5;
+ res[(j2+6)*resStride + i] += alpha*C6;
+ res[(j2+7)*resStride + i] += alpha*C7;
}
- else // nr==4
+ }
+ }
+
+ // Second pass using depth x 4 panels
+ // If nr==8, then we have at most one such panel
+ if(nr>=4)
+ {
+ for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
+ {
+ // loops on each largest micro horizontal panel of lhs (mr x depth)
+ // => we select a mr x 4 micro block of res which is entirely
+ // stored into mr/packet_size x 4 registers.
+ for(Index i=0; i<peeled_mc; i+=mr)
{
+ const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
+ // prefetch(&blA[0]);
+
+ // gets res block as register
+ AccPacket C0, C1, C2, C3;
+ traits.initAcc(C0);
+ traits.initAcc(C1);
+ traits.initAcc(C2);
+ traits.initAcc(C3);
+
+ ResScalar* r0 = &res[(j2+0)*resStride + i];
+
+ // performs "inner" products
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB*4];
+ LhsPacket A0, A1;
+ // uncomment for register prefetching
+ // traits.loadLhs(blA, A0);
+ for(Index k=0; k<peeled_kc; k+=4)
+ {
+ EIGEN_ASM_COMMENT("begin gegp micro kernel 1p x 4");
+
+ RhsPacket B_0, B1;
+
+#define EIGEN_GEBGP_ONESTEP4(K) \
+ traits.loadLhs(&blA[K*LhsProgress], A0); \
+ traits.broadcastRhs(&blB[0+4*K*RhsProgress], B_0, B1); \
+ traits.madd(A0, B_0,C0, B_0); \
+ traits.madd(A0, B1, C1, B1); \
+ traits.broadcastRhs(&blB[2+4*K*RhsProgress], B_0, B1); \
+ traits.madd(A0, B_0,C2, B_0); \
+ traits.madd(A0, B1, C3, B1)
+
+ EIGEN_GEBGP_ONESTEP4(0);
+ EIGEN_GEBGP_ONESTEP4(1);
+ EIGEN_GEBGP_ONESTEP4(2);
+ EIGEN_GEBGP_ONESTEP4(3);
+
+ blB += 4*4*RhsProgress;
+ blA += 4*mr;
+ }
+ // process remaining peeled loop
+ for(Index k=peeled_kc; k<depth; k++)
+ {
+ RhsPacket B_0, B1;
+ EIGEN_GEBGP_ONESTEP4(0);
+
+ blB += 4*RhsProgress;
+ blA += mr;
+ }
+ #undef EIGEN_GEBGP_ONESTEP4
+
ResPacket R0, R1, R2;
ResPacket alphav = pset1<ResPacket>(alpha);
@@ -740,21 +818,17 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
pstoreu(r0+3*resStride, R0);
}
- }
-
- for(Index i=peeled_mc; i<rows; i++)
- {
- const LhsScalar* blA = &blockA[i*strideA+offsetA];
- prefetch(&blA[0]);
-
- // gets a 1 x nr res block as registers
- ResScalar C0(0), C1(0), C2(0), C3(0), C4(0), C5(0), C6(0), C7(0);
- // FIXME directly use blockB ???
- const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
- // TODO peel this loop
- for(Index k=0; k<depth; k++)
+ for(Index i=peeled_mc; i<rows; i++)
{
- if(nr==4)
+ const LhsScalar* blA = &blockA[i*strideA+offsetA];
+ prefetch(&blA[0]);
+
+ // gets a 1 x 4 res block as registers
+ ResScalar C0(0), C1(0), C2(0), C3(0);
+ // FIXME directly use blockB ???
+ const RhsScalar* blB = &blockB[j2*strideB+offsetB*4];
+ // TODO peel this loop
+ for(Index k=0; k<depth; k++)
{
LhsScalar A0;
RhsScalar B_0, B_1;
@@ -770,51 +844,20 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
B_1 = blB[3];
MADD(cj,A0,B_0,C2, B_0);
MADD(cj,A0,B_1,C3, B_1);
- }
- else // nr==8
- {
- LhsScalar A0;
- RhsScalar B_0, B_1;
- A0 = blA[k];
-
- B_0 = blB[0];
- B_1 = blB[1];
- MADD(cj,A0,B_0,C0, B_0);
- MADD(cj,A0,B_1,C1, B_1);
-
- B_0 = blB[2];
- B_1 = blB[3];
- MADD(cj,A0,B_0,C2, B_0);
- MADD(cj,A0,B_1,C3, B_1);
-
- B_0 = blB[4];
- B_1 = blB[5];
- MADD(cj,A0,B_0,C4, B_0);
- MADD(cj,A0,B_1,C5, B_1);
-
- B_0 = blB[6];
- B_1 = blB[7];
- MADD(cj,A0,B_0,C6, B_0);
- MADD(cj,A0,B_1,C7, B_1);
+ blB += 4;
}
-
- blB += nr;
+ res[(j2+0)*resStride + i] += alpha*C0;
+ res[(j2+1)*resStride + i] += alpha*C1;
+ res[(j2+2)*resStride + i] += alpha*C2;
+ res[(j2+3)*resStride + i] += alpha*C3;
}
- res[(j2+0)*resStride + i] += alpha*C0;
- res[(j2+1)*resStride + i] += alpha*C1;
- res[(j2+2)*resStride + i] += alpha*C2;
- res[(j2+3)*resStride + i] += alpha*C3;
- if(nr==8) res[(j2+4)*resStride + i] += alpha*C4;
- if(nr==8) res[(j2+5)*resStride + i] += alpha*C5;
- if(nr==8) res[(j2+6)*resStride + i] += alpha*C6;
- if(nr==8) res[(j2+7)*resStride + i] += alpha*C7;
}
}
// process remaining rhs/res columns one at a time
// => do the same but with nr==1
- for(Index j2=packet_cols; j2<cols; j2++)
+ for(Index j2=packet_cols4; j2<cols; j2++)
{
for(Index i=0; i<peeled_mc; i+=mr)
{
@@ -1002,38 +1045,65 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, ColMajor, Conjugate, Pan
EIGEN_UNUSED_VARIABLE(offset);
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
- Index packet_cols = (cols/nr) * nr;
+ Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
+ Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
Index count = 0;
- for(Index j2=0; j2<packet_cols; j2+=nr)
+ if(nr>=8)
{
- // skip what we have before
- if(PanelMode) count += nr * offset;
- const Scalar* b0 = &rhs[(j2+0)*rhsStride];
- const Scalar* b1 = &rhs[(j2+1)*rhsStride];
- const Scalar* b2 = &rhs[(j2+2)*rhsStride];
- const Scalar* b3 = &rhs[(j2+3)*rhsStride];
- const Scalar* b4 = &rhs[(j2+4)*rhsStride];
- const Scalar* b5 = &rhs[(j2+5)*rhsStride];
- const Scalar* b6 = &rhs[(j2+6)*rhsStride];
- const Scalar* b7 = &rhs[(j2+7)*rhsStride];
- for(Index k=0; k<depth; k++)
+ for(Index j2=0; j2<packet_cols8; j2+=8)
{
- blockB[count+0] = cj(b0[k]);
- blockB[count+1] = cj(b1[k]);
- if(nr>=4) blockB[count+2] = cj(b2[k]);
- if(nr>=4) blockB[count+3] = cj(b3[k]);
- if(nr>=8) blockB[count+4] = cj(b4[k]);
- if(nr>=8) blockB[count+5] = cj(b5[k]);
- if(nr>=8) blockB[count+6] = cj(b6[k]);
- if(nr>=8) blockB[count+7] = cj(b7[k]);
- count += nr;
+ // skip what we have before
+ if(PanelMode) count += 8 * offset;
+ const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+ const Scalar* b1 = &rhs[(j2+1)*rhsStride];
+ const Scalar* b2 = &rhs[(j2+2)*rhsStride];
+ const Scalar* b3 = &rhs[(j2+3)*rhsStride];
+ const Scalar* b4 = &rhs[(j2+4)*rhsStride];
+ const Scalar* b5 = &rhs[(j2+5)*rhsStride];
+ const Scalar* b6 = &rhs[(j2+6)*rhsStride];
+ const Scalar* b7 = &rhs[(j2+7)*rhsStride];
+ for(Index k=0; k<depth; k++)
+ {
+ blockB[count+0] = cj(b0[k]);
+ blockB[count+1] = cj(b1[k]);
+ blockB[count+2] = cj(b2[k]);
+ blockB[count+3] = cj(b3[k]);
+ blockB[count+4] = cj(b4[k]);
+ blockB[count+5] = cj(b5[k]);
+ blockB[count+6] = cj(b6[k]);
+ blockB[count+7] = cj(b7[k]);
+ count += 8;
+ }
+ // skip what we have after
+ if(PanelMode) count += 8 * (stride-offset-depth);
+ }
+ }
+
+ if(nr>=4)
+ {
+ for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
+ {
+ // skip what we have before
+ if(PanelMode) count += 4 * offset;
+ const Scalar* b0 = &rhs[(j2+0)*rhsStride];
+ const Scalar* b1 = &rhs[(j2+1)*rhsStride];
+ const Scalar* b2 = &rhs[(j2+2)*rhsStride];
+ const Scalar* b3 = &rhs[(j2+3)*rhsStride];
+ for(Index k=0; k<depth; k++)
+ {
+ blockB[count+0] = cj(b0[k]);
+ blockB[count+1] = cj(b1[k]);
+ blockB[count+2] = cj(b2[k]);
+ blockB[count+3] = cj(b3[k]);
+ count += 4;
+ }
+ // skip what we have after
+ if(PanelMode) count += 4 * (stride-offset-depth);
}
- // skip what we have after
- if(PanelMode) count += nr * (stride-offset-depth);
}
// copy the remaining columns one at a time (nr==1)
- for(Index j2=packet_cols; j2<cols; ++j2)
+ for(Index j2=packet_cols4; j2<cols; ++j2)
{
if(PanelMode) count += offset;
const Scalar* b0 = &rhs[(j2+0)*rhsStride];
@@ -1064,36 +1134,66 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, nr, RowMajor, Conjugate, Pan
EIGEN_UNUSED_VARIABLE(offset);
eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
- Index packet_cols = (cols/nr) * nr;
+ Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
+ Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
Index count = 0;
- for(Index j2=0; j2<packet_cols; j2+=nr)
+
+ if(nr>=8)
{
- // skip what we have before
- if(PanelMode) count += nr * offset;
- for(Index k=0; k<depth; k++)
+ for(Index j2=0; j2<packet_cols8; j2+=8)
{
- if (nr == PacketSize) {
- Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
- pstoreu(blockB+count, cj.pconj(A));
- count += PacketSize;
- } else {
- const Scalar* b0 = &rhs[k*rhsStride + j2];
- blockB[count+0] = cj(b0[0]);
- blockB[count+1] = cj(b0[1]);
- if(nr>=4) blockB[count+2] = cj(b0[2]);
- if(nr>=4) blockB[count+3] = cj(b0[3]);
- if(nr>=8) blockB[count+4] = cj(b0[4]);
- if(nr>=8) blockB[count+5] = cj(b0[5]);
- if(nr>=8) blockB[count+6] = cj(b0[6]);
- if(nr>=8) blockB[count+7] = cj(b0[7]);
- count += nr;
+ // skip what we have before
+ if(PanelMode) count += 8 * offset;
+ for(Index k=0; k<depth; k++)
+ {
+ if (8 == PacketSize) {
+ Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
+ pstoreu(blockB+count, cj.pconj(A));
+ count += PacketSize;
+ } else {
+ const Scalar* b0 = &rhs[k*rhsStride + j2];
+ blockB[count+0] = cj(b0[0]);
+ blockB[count+1] = cj(b0[1]);
+ blockB[count+2] = cj(b0[2]);
+ blockB[count+3] = cj(b0[3]);
+ blockB[count+4] = cj(b0[4]);
+ blockB[count+5] = cj(b0[5]);
+ blockB[count+6] = cj(b0[6]);
+ blockB[count+7] = cj(b0[7]);
+ count += 8;
+ }
+ }
+ // skip what we have after
+ if(PanelMode) count += 8 * (stride-offset-depth);
+ }
+ }
+ if(nr>=4)
+ {
+ for(Index j2=packet_cols8; j2<packet_cols4; j2+=4)
+ {
+ // skip what we have before
+ if(PanelMode) count += 4 * offset;
+ for(Index k=0; k<depth; k++)
+ {
+ if (4 == PacketSize) {
+ Packet A = ploadu<Packet>(&rhs[k*rhsStride + j2]);
+ pstoreu(blockB+count, cj.pconj(A));
+ count += PacketSize;
+ } else {
+ const Scalar* b0 = &rhs[k*rhsStride + j2];
+ blockB[count+0] = cj(b0[0]);
+ blockB[count+1] = cj(b0[1]);
+ blockB[count+2] = cj(b0[2]);
+ blockB[count+3] = cj(b0[3]);
+ count += 4;
+ }
}
+ // skip what we have after
+ if(PanelMode) count += 4 * (stride-offset-depth);
}
- // skip what we have after
- if(PanelMode) count += nr * (stride-offset-depth);
}
// copy the remaining columns one at a time (nr==1)
- for(Index j2=packet_cols; j2<cols; ++j2)
+ for(Index j2=packet_cols4; j2<cols; ++j2)
{
if(PanelMode) count += offset;
const Scalar* b0 = &rhs[j2];
diff --git a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
index d9fd9f556..34480c707 100644
--- a/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
+++ b/Eigen/src/Core/products/SelfadjointMatrixMatrix.h
@@ -82,7 +82,8 @@ struct symm_pack_rhs
Index end_k = k2 + rows;
Index count = 0;
const_blas_data_mapper<Scalar,Index,StorageOrder> rhs(_rhs,rhsStride);
- Index packet_cols = (cols/nr)*nr;
+ Index packet_cols8 = nr>=8 ? (cols/8) * 8 : 0;
+ Index packet_cols4 = nr>=4 ? (cols/4) * 4 : 0;
// first part: normal case
for(Index j2=0; j2<k2; j2+=nr)
@@ -108,90 +109,134 @@ struct symm_pack_rhs
}
// second part: diagonal block
- for(Index j2=k2; j2<(std::min)(k2+rows,packet_cols); j2+=nr)
+ Index end8 = nr>=8 ? (std::min)(k2+rows,packet_cols8) : k2;
+ if(nr>=8)
{
- // again we can split vertically in three different parts (transpose, symmetric, normal)
- // transpose
- for(Index k=k2; k<j2; k++)
+ for(Index j2=k2; j2<end8; j2+=8)
{
- blockB[count+0] = numext::conj(rhs(j2+0,k));
- blockB[count+1] = numext::conj(rhs(j2+1,k));
- if (nr>=4)
+ // again we can split vertically in three different parts (transpose, symmetric, normal)
+ // transpose
+ for(Index k=k2; k<j2; k++)
{
+ blockB[count+0] = numext::conj(rhs(j2+0,k));
+ blockB[count+1] = numext::conj(rhs(j2+1,k));
blockB[count+2] = numext::conj(rhs(j2+2,k));
blockB[count+3] = numext::conj(rhs(j2+3,k));
- }
- if (nr>=8)
- {
blockB[count+4] = numext::conj(rhs(j2+4,k));
blockB[count+5] = numext::conj(rhs(j2+5,k));
blockB[count+6] = numext::conj(rhs(j2+6,k));
blockB[count+7] = numext::conj(rhs(j2+7,k));
+ count += 8;
}
- count += nr;
- }
- // symmetric
- Index h = 0;
- for(Index k=j2; k<j2+nr; k++)
- {
- // normal
- for (Index w=0 ; w<h; ++w)
- blockB[count+w] = rhs(k,j2+w);
+ // symmetric
+ Index h = 0;
+ for(Index k=j2; k<j2+8; k++)
+ {
+ // normal
+ for (Index w=0 ; w<h; ++w)
+ blockB[count+w] = rhs(k,j2+w);
- blockB[count+h] = numext::real(rhs(k,k));
+ blockB[count+h] = numext::real(rhs(k,k));
- // transpose
- for (Index w=h+1 ; w<nr; ++w)
- blockB[count+w] = numext::conj(rhs(j2+w,k));
- count += nr;
- ++h;
- }
- // normal
- for(Index k=j2+nr; k<end_k; k++)
- {
- blockB[count+0] = rhs(k,j2+0);
- blockB[count+1] = rhs(k,j2+1);
- if (nr>=4)
+ // transpose
+ for (Index w=h+1 ; w<8; ++w)
+ blockB[count+w] = numext::conj(rhs(j2+w,k));
+ count += 8;
+ ++h;
+ }
+ // normal
+ for(Index k=j2+8; k<end_k; k++)
{
+ blockB[count+0] = rhs(k,j2+0);
+ blockB[count+1] = rhs(k,j2+1);
blockB[count+2] = rhs(k,j2+2);
blockB[count+3] = rhs(k,j2+3);
- }
- if (nr>=8)
- {
blockB[count+4] = rhs(k,j2+4);
blockB[count+5] = rhs(k,j2+5);
blockB[count+6] = rhs(k,j2+6);
blockB[count+7] = rhs(k,j2+7);
+ count += 8;
}
- count += nr;
}
}
-
- // third part: transposed
- for(Index j2=k2+rows; j2<packet_cols; j2+=nr)
+ if(nr>=4)
{
- for(Index k=k2; k<end_k; k++)
+ for(Index j2=end8; j2<(std::min)(k2+rows,packet_cols4); j2+=4)
{
- blockB[count+0] = numext::conj(rhs(j2+0,k));
- blockB[count+1] = numext::conj(rhs(j2+1,k));
- if (nr>=4)
+ // again we can split vertically in three different parts (transpose, symmetric, normal)
+ // transpose
+ for(Index k=k2; k<j2; k++)
{
+ blockB[count+0] = numext::conj(rhs(j2+0,k));
+ blockB[count+1] = numext::conj(rhs(j2+1,k));
blockB[count+2] = numext::conj(rhs(j2+2,k));
blockB[count+3] = numext::conj(rhs(j2+3,k));
+ count += 4;
}
- if (nr>=8)
+ // symmetric
+ Index h = 0;
+ for(Index k=j2; k<j2+4; k++)
+ {
+ // normal
+ for (Index w=0 ; w<h; ++w)
+ blockB[count+w] = rhs(k,j2+w);
+
+ blockB[count+h] = numext::real(rhs(k,k));
+
+ // transpose
+ for (Index w=h+1 ; w<4; ++w)
+ blockB[count+w] = numext::conj(rhs(j2+w,k));
+ count += 4;
+ ++h;
+ }
+ // normal
+ for(Index k=j2+4; k<end_k; k++)
+ {
+ blockB[count+0] = rhs(k,j2+0);
+ blockB[count+1] = rhs(k,j2+1);
+ blockB[count+2] = rhs(k,j2+2);
+ blockB[count+3] = rhs(k,j2+3);
+ count += 4;
+ }
+ }
+ }
+
+ // third part: transposed
+ if(nr>=8)
+ {
+ for(Index j2=k2+rows; j2<packet_cols8; j2+=8)
+ {
+ for(Index k=k2; k<end_k; k++)
{
+ blockB[count+0] = numext::conj(rhs(j2+0,k));
+ blockB[count+1] = numext::conj(rhs(j2+1,k));
+ blockB[count+2] = numext::conj(rhs(j2+2,k));
+ blockB[count+3] = numext::conj(rhs(j2+3,k));
blockB[count+4] = numext::conj(rhs(j2+4,k));
blockB[count+5] = numext::conj(rhs(j2+5,k));
blockB[count+6] = numext::conj(rhs(j2+6,k));
blockB[count+7] = numext::conj(rhs(j2+7,k));
+ count += 8;
+ }
+ }
+ }
+ if(nr>=4)
+ {
+ for(Index j2=(std::max)(packet_cols8,k2+rows); j2<packet_cols4; j2+=4)
+ {
+ for(Index k=k2; k<end_k; k++)
+ {
+ blockB[count+0] = numext::conj(rhs(j2+0,k));
+ blockB[count+1] = numext::conj(rhs(j2+1,k));
+ blockB[count+2] = numext::conj(rhs(j2+2,k));
+ blockB[count+3] = numext::conj(rhs(j2+3,k));
+ count += 4;
}
- count += nr;
}
}
// copy the remaining columns one at a time (=> the same with nr==1)
- for(Index j2=packet_cols; j2<cols; ++j2)
+ for(Index j2=packet_cols4; j2<cols; ++j2)
{
// transpose
Index half = (std::min)(end_k,j2);
@@ -289,11 +334,10 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,t
// kc must smaller than mc
kc = (std::min)(kc,mc);
- std::size_t sizeW = kc*Traits::WorkSpaceFactor;
- std::size_t sizeB = sizeW + kc*cols;
+ std::size_t sizeB = kc*cols;
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
- Scalar* blockB = allocatedBlockB + sizeW;
+ Scalar* blockB = allocatedBlockB;
gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
symm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
@@ -376,11 +420,10 @@ EIGEN_DONT_INLINE void product_selfadjoint_matrix<Scalar,Index,LhsStorageOrder,f
Index mc = rows; // cache block size along the M direction
Index nc = cols; // cache block size along the N direction
computeProductBlockingSizes<Scalar,Scalar>(kc, mc, nc);
- std::size_t sizeW = kc*Traits::WorkSpaceFactor;
- std::size_t sizeB = sizeW + kc*cols;
+ std::size_t sizeB = kc*cols;
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, kc*mc, 0);
ei_declare_aligned_stack_constructed_variable(Scalar, allocatedBlockB, sizeB, 0);
- Scalar* blockB = allocatedBlockB + sizeW;
+ Scalar* blockB = allocatedBlockB;
gebp_kernel<Scalar, Scalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp_kernel;
gemm_pack_lhs<Scalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;