// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2020 Everton Constantino (everton.constantino@ibm.com) // Copyright (C) 2021 Chip Kerchner (chip.kerchner@ibm.com) // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_MATRIX_PRODUCT_ALTIVEC_H #define EIGEN_MATRIX_PRODUCT_ALTIVEC_H #include "MatrixProductCommon.h" // Since LLVM doesn't support dynamic dispatching, force either always MMA or VSX #if EIGEN_COMP_LLVM #if !defined(EIGEN_ALTIVEC_DISABLE_MMA) && !defined(EIGEN_ALTIVEC_MMA_ONLY) #ifdef __MMA__ #define EIGEN_ALTIVEC_MMA_ONLY #else #define EIGEN_ALTIVEC_DISABLE_MMA #endif #endif #endif #ifdef __has_builtin #if __has_builtin(__builtin_mma_assemble_acc) #define ALTIVEC_MMA_SUPPORT #endif #endif #if defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA) #include "MatrixProductMMA.h" #endif /************************************************************************************************** * TODO * * - Check StorageOrder on dhs_pack (the innermost second loop seems unvectorized when it could). * * - Check the possibility of transposing as GETREAL and GETIMAG when needed. * **************************************************************************************************/ namespace Eigen { namespace internal { /************************** * Constants and typedefs * **************************/ template struct quad_traits { typedef typename packet_traits::type vectortype; typedef PacketBlock type; typedef vectortype rhstype; enum { vectorsize = packet_traits::size, size = 4, rows = 4 }; }; template<> struct quad_traits { typedef Packet2d vectortype; typedef PacketBlock type; typedef PacketBlock rhstype; enum { vectorsize = packet_traits::size, size = 2, rows = 4 }; }; // MatrixProduct decomposes real/imaginary vectors into a real vector and an imaginary vector, this turned out // to be faster than Eigen's usual approach of having real/imaginary pairs on a single vector. This constants then // are responsible to extract from convert between Eigen's and MatrixProduct approach. const static Packet16uc p16uc_GETREAL32 = { 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}; const static Packet16uc p16uc_GETIMAG32 = { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}; const static Packet16uc p16uc_GETREAL64 = { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23}; //[a,ai],[b,bi] = [ai,bi] const static Packet16uc p16uc_GETIMAG64 = { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31}; /********************************************* * Single precision real and complex packing * * *******************************************/ /** * Symm packing is related to packing of symmetric adjoint blocks, as expected the packing leaves * the diagonal real, whatever is below it is copied from the respective upper diagonal element and * conjugated. There's no PanelMode available for symm packing. * * Packing in general is supposed to leave the lhs block and the rhs block easy to be read by gemm using * its respective rank-update instructions. The float32/64 versions are different because at this moment * the size of the accumulator is fixed at 512-bits so you can't have a 4x4 accumulator of 64-bit elements. * * As mentioned earlier MatrixProduct breaks complex numbers into a real vector and a complex vector so packing has * to take that into account, at the moment, we run pack the real part and then the imaginary part, this is the main * reason why packing for complex is broken down into several different parts, also the reason why we endup having a * float32/64 and complex float32/64 version. **/ template EIGEN_ALWAYS_INLINE std::complex getAdjointVal(Index i, Index j, const_blas_data_mapper, Index, StorageOrder>& dt) { std::complex v; if(i < j) { v.real( dt(j,i).real()); v.imag(-dt(j,i).imag()); } else if(i > j) { v.real( dt(i,j).real()); v.imag( dt(i,j).imag()); } else { v.real( dt(i,j).real()); v.imag((Scalar)0.0); } return v; } template EIGEN_STRONG_INLINE void symm_pack_complex_rhs_helper(std::complex* blockB, const std::complex* _rhs, Index rhsStride, Index rows, Index cols, Index k2) { const Index depth = k2 + rows; const_blas_data_mapper, Index, StorageOrder> rhs(_rhs, rhsStride); const Index vectorSize = N*quad_traits::vectorsize; const Index vectorDelta = vectorSize * rows; Scalar* blockBf = reinterpret_cast(blockB); Index rir = 0, rii, j = 0; for(; j + vectorSize <= cols; j+=vectorSize) { rii = rir + vectorDelta; for(Index i = k2; i < depth; i++) { for(Index k = 0; k < vectorSize; k++) { std::complex v = getAdjointVal(i, j + k, rhs); blockBf[rir + k] = v.real(); blockBf[rii + k] = v.imag(); } rir += vectorSize; rii += vectorSize; } rir += vectorDelta; } if (j < cols) { rii = rir + ((cols - j) * rows); for(Index i = k2; i < depth; i++) { Index k = j; for(; k < cols; k++) { std::complex v = getAdjointVal(i, k, rhs); blockBf[rir] = v.real(); blockBf[rii] = v.imag(); rir += 1; rii += 1; } } } } template EIGEN_STRONG_INLINE void symm_pack_complex_lhs_helper(std::complex* blockA, const std::complex* _lhs, Index lhsStride, Index cols, Index rows) { const Index depth = cols; const_blas_data_mapper, Index, StorageOrder> lhs(_lhs, lhsStride); const Index vectorSize = quad_traits::vectorsize; const Index vectorDelta = vectorSize * depth; Scalar* blockAf = (Scalar *)(blockA); Index rir = 0, rii, j = 0; for(; j + vectorSize <= rows; j+=vectorSize) { rii = rir + vectorDelta; for(Index i = 0; i < depth; i++) { for(Index k = 0; k < vectorSize; k++) { std::complex v = getAdjointVal(j+k, i, lhs); blockAf[rir + k] = v.real(); blockAf[rii + k] = v.imag(); } rir += vectorSize; rii += vectorSize; } rir += vectorDelta; } if (j < rows) { rii = rir + ((rows - j) * depth); for(Index i = 0; i < depth; i++) { Index k = j; for(; k < rows; k++) { std::complex v = getAdjointVal(k, i, lhs); blockAf[rir] = v.real(); blockAf[rii] = v.imag(); rir += 1; rii += 1; } } } } template EIGEN_STRONG_INLINE void symm_pack_rhs_helper(Scalar* blockB, const Scalar* _rhs, Index rhsStride, Index rows, Index cols, Index k2) { const Index depth = k2 + rows; const_blas_data_mapper rhs(_rhs, rhsStride); const Index vectorSize = quad_traits::vectorsize; Index ri = 0, j = 0; for(; j + N*vectorSize <= cols; j+=N*vectorSize) { Index i = k2; for(; i < depth; i++) { for(Index k = 0; k < N*vectorSize; k++) { if(i <= j+k) blockB[ri + k] = rhs(j+k, i); else blockB[ri + k] = rhs(i, j+k); } ri += N*vectorSize; } } if (j < cols) { for(Index i = k2; i < depth; i++) { Index k = j; for(; k < cols; k++) { if(k <= i) blockB[ri] = rhs(i, k); else blockB[ri] = rhs(k, i); ri += 1; } } } } template EIGEN_STRONG_INLINE void symm_pack_lhs_helper(Scalar* blockA, const Scalar* _lhs, Index lhsStride, Index cols, Index rows) { const Index depth = cols; const_blas_data_mapper lhs(_lhs, lhsStride); const Index vectorSize = quad_traits::vectorsize; Index ri = 0, j = 0; for(; j + vectorSize <= rows; j+=vectorSize) { Index i = 0; for(; i < depth; i++) { for(Index k = 0; k < vectorSize; k++) { if(i <= j+k) blockA[ri + k] = lhs(j+k, i); else blockA[ri + k] = lhs(i, j+k); } ri += vectorSize; } } if (j < rows) { for(Index i = 0; i < depth; i++) { Index k = j; for(; k < rows; k++) { if(i <= k) blockA[ri] = lhs(k, i); else blockA[ri] = lhs(i, k); ri += 1; } } } } template struct symm_pack_rhs, Index, nr, StorageOrder> { void operator()(std::complex* blockB, const std::complex* _rhs, Index rhsStride, Index rows, Index cols, Index k2) { symm_pack_complex_rhs_helper(blockB, _rhs, rhsStride, rows, cols, k2); } }; template struct symm_pack_lhs, Index, Pack1, Pack2_dummy, StorageOrder> { void operator()(std::complex* blockA, const std::complex* _lhs, Index lhsStride, Index cols, Index rows) { symm_pack_complex_lhs_helper(blockA, _lhs, lhsStride, cols, rows); } }; // *********** symm_pack std::complex *********** template struct symm_pack_rhs, Index, nr, StorageOrder> { void operator()(std::complex* blockB, const std::complex* _rhs, Index rhsStride, Index rows, Index cols, Index k2) { symm_pack_complex_rhs_helper(blockB, _rhs, rhsStride, rows, cols, k2); } }; template struct symm_pack_lhs, Index, Pack1, Pack2_dummy, StorageOrder> { void operator()(std::complex* blockA, const std::complex* _lhs, Index lhsStride, Index cols, Index rows) { symm_pack_complex_lhs_helper(blockA, _lhs, lhsStride, cols, rows); } }; // *********** symm_pack float32 *********** template struct symm_pack_rhs { void operator()(float* blockB, const float* _rhs, Index rhsStride, Index rows, Index cols, Index k2) { symm_pack_rhs_helper(blockB, _rhs, rhsStride, rows, cols, k2); } }; template struct symm_pack_lhs { void operator()(float* blockA, const float* _lhs, Index lhsStride, Index cols, Index rows) { symm_pack_lhs_helper(blockA, _lhs, lhsStride, cols, rows); } }; // *********** symm_pack float64 *********** template struct symm_pack_rhs { void operator()(double* blockB, const double* _rhs, Index rhsStride, Index rows, Index cols, Index k2) { symm_pack_rhs_helper(blockB, _rhs, rhsStride, rows, cols, k2); } }; template struct symm_pack_lhs { void operator()(double* blockA, const double* _lhs, Index lhsStride, Index cols, Index rows) { symm_pack_lhs_helper(blockA, _lhs, lhsStride, cols, rows); } }; /** * PanelMode * Packing might be called several times before being multiplied by gebp_kernel, this happens because * on special occasions it fills part of block with other parts of the matrix. Two variables control * how PanelMode should behave: offset and stride. The idea is that those variables represent whatever * is going to be the real offset and stride in the future and this is what you should obey. The process * is to behave as you would with normal packing but leave the start of each part with the correct offset * and the end as well respecting the real stride the block will have. Gebp is aware of both blocks stride * and offset and behaves accordingly. **/ template EIGEN_ALWAYS_INLINE void storeBlock(Scalar* to, PacketBlock& block) { const Index size = 16 / sizeof(Scalar); pstore(to + (0 * size), block.packet[0]); pstore(to + (1 * size), block.packet[1]); pstore(to + (2 * size), block.packet[2]); pstore(to + (3 * size), block.packet[3]); } template EIGEN_ALWAYS_INLINE void storeBlock(Scalar* to, PacketBlock& block) { const Index size = 16 / sizeof(Scalar); pstore(to + (0 * size), block.packet[0]); pstore(to + (1 * size), block.packet[1]); } // General template for lhs & rhs complex packing. template struct dhs_cpack { EIGEN_STRONG_INLINE void operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { const Index vectorSize = quad_traits::vectorsize; const Index vectorDelta = vectorSize * ((PanelMode) ? stride : depth); Index rir = ((PanelMode) ? (vectorSize*offset) : 0), rii; Scalar* blockAt = reinterpret_cast(blockA); Index j = 0; for(; j + vectorSize <= rows; j+=vectorSize) { Index i = 0; rii = rir + vectorDelta; for(; i + vectorSize <= depth; i+=vectorSize) { PacketBlock blockr, blocki; PacketBlock cblock; if (UseLhs) { bload(cblock, lhs, j, i); } else { bload(cblock, lhs, i, j); } blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[4].v, p16uc_GETREAL32); blockr.packet[1] = vec_perm(cblock.packet[1].v, cblock.packet[5].v, p16uc_GETREAL32); blockr.packet[2] = vec_perm(cblock.packet[2].v, cblock.packet[6].v, p16uc_GETREAL32); blockr.packet[3] = vec_perm(cblock.packet[3].v, cblock.packet[7].v, p16uc_GETREAL32); blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[4].v, p16uc_GETIMAG32); blocki.packet[1] = vec_perm(cblock.packet[1].v, cblock.packet[5].v, p16uc_GETIMAG32); blocki.packet[2] = vec_perm(cblock.packet[2].v, cblock.packet[6].v, p16uc_GETIMAG32); blocki.packet[3] = vec_perm(cblock.packet[3].v, cblock.packet[7].v, p16uc_GETIMAG32); if(Conjugate) { blocki.packet[0] = -blocki.packet[0]; blocki.packet[1] = -blocki.packet[1]; blocki.packet[2] = -blocki.packet[2]; blocki.packet[3] = -blocki.packet[3]; } if(((StorageOrder == RowMajor) && UseLhs) || (((StorageOrder == ColMajor) && !UseLhs))) { ptranspose(blockr); ptranspose(blocki); } storeBlock(blockAt + rir, blockr); storeBlock(blockAt + rii, blocki); rir += 4*vectorSize; rii += 4*vectorSize; } for(; i < depth; i++) { PacketBlock blockr, blocki; PacketBlock cblock; if(((StorageOrder == ColMajor) && UseLhs) || (((StorageOrder == RowMajor) && !UseLhs))) { if (UseLhs) { cblock.packet[0] = lhs.template loadPacket(j + 0, i); cblock.packet[1] = lhs.template loadPacket(j + 2, i); } else { cblock.packet[0] = lhs.template loadPacket(i, j + 0); cblock.packet[1] = lhs.template loadPacket(i, j + 2); } } else { std::complex lhs0, lhs1; if (UseLhs) { lhs0 = lhs(j + 0, i); lhs1 = lhs(j + 1, i); cblock.packet[0] = pload2(&lhs0, &lhs1); lhs0 = lhs(j + 2, i); lhs1 = lhs(j + 3, i); cblock.packet[1] = pload2(&lhs0, &lhs1); } else { lhs0 = lhs(i, j + 0); lhs1 = lhs(i, j + 1); cblock.packet[0] = pload2(&lhs0, &lhs1); lhs0 = lhs(i, j + 2); lhs1 = lhs(i, j + 3); cblock.packet[1] = pload2(&lhs0, &lhs1); } } blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETREAL32); blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETIMAG32); if(Conjugate) { blocki.packet[0] = -blocki.packet[0]; } pstore(blockAt + rir, blockr.packet[0]); pstore(blockAt + rii, blocki.packet[0]); rir += vectorSize; rii += vectorSize; } rir += ((PanelMode) ? (vectorSize*(2*stride - depth)) : vectorDelta); } if (j < rows) { if(PanelMode) rir += (offset*(rows - j - vectorSize)); rii = rir + (((PanelMode) ? stride : depth) * (rows - j)); for(Index i = 0; i < depth; i++) { Index k = j; for(; k < rows; k++) { if (UseLhs) { blockAt[rir] = lhs(k, i).real(); if(Conjugate) blockAt[rii] = -lhs(k, i).imag(); else blockAt[rii] = lhs(k, i).imag(); } else { blockAt[rir] = lhs(i, k).real(); if(Conjugate) blockAt[rii] = -lhs(i, k).imag(); else blockAt[rii] = lhs(i, k).imag(); } rir += 1; rii += 1; } } } } }; // General template for lhs & rhs packing. template struct dhs_pack{ EIGEN_STRONG_INLINE void operator()(Scalar* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { const Index vectorSize = quad_traits::vectorsize; Index ri = 0, j = 0; for(; j + vectorSize <= rows; j+=vectorSize) { Index i = 0; if(PanelMode) ri += vectorSize*offset; for(; i + vectorSize <= depth; i+=vectorSize) { PacketBlock block; if (UseLhs) { bload(block, lhs, j, i); } else { bload(block, lhs, i, j); } if(((StorageOrder == RowMajor) && UseLhs) || ((StorageOrder == ColMajor) && !UseLhs)) { ptranspose(block); } storeBlock(blockA + ri, block); ri += 4*vectorSize; } for(; i < depth; i++) { if(((StorageOrder == RowMajor) && UseLhs) || ((StorageOrder == ColMajor) && !UseLhs)) { if (UseLhs) { blockA[ri+0] = lhs(j+0, i); blockA[ri+1] = lhs(j+1, i); blockA[ri+2] = lhs(j+2, i); blockA[ri+3] = lhs(j+3, i); } else { blockA[ri+0] = lhs(i, j+0); blockA[ri+1] = lhs(i, j+1); blockA[ri+2] = lhs(i, j+2); blockA[ri+3] = lhs(i, j+3); } } else { Packet lhsV; if (UseLhs) { lhsV = lhs.template loadPacket(j, i); } else { lhsV = lhs.template loadPacket(i, j); } pstore(blockA + ri, lhsV); } ri += vectorSize; } if(PanelMode) ri += vectorSize*(stride - offset - depth); } if (j < rows) { if(PanelMode) ri += offset*(rows - j); for(Index i = 0; i < depth; i++) { Index k = j; for(; k < rows; k++) { if (UseLhs) { blockA[ri] = lhs(k, i); } else { blockA[ri] = lhs(i, k); } ri += 1; } } } } }; // General template for lhs packing, float64 specialization. template struct dhs_pack { EIGEN_STRONG_INLINE void operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { const Index vectorSize = quad_traits::vectorsize; Index ri = 0, j = 0; for(; j + vectorSize <= rows; j+=vectorSize) { Index i = 0; if(PanelMode) ri += vectorSize*offset; for(; i + vectorSize <= depth; i+=vectorSize) { PacketBlock block; if(StorageOrder == RowMajor) { block.packet[0] = lhs.template loadPacket(j + 0, i); block.packet[1] = lhs.template loadPacket(j + 1, i); ptranspose(block); } else { block.packet[0] = lhs.template loadPacket(j, i + 0); block.packet[1] = lhs.template loadPacket(j, i + 1); } storeBlock(blockA + ri, block); ri += 2*vectorSize; } for(; i < depth; i++) { if(StorageOrder == RowMajor) { blockA[ri+0] = lhs(j+0, i); blockA[ri+1] = lhs(j+1, i); } else { Packet2d lhsV = lhs.template loadPacket(j, i); pstore(blockA + ri, lhsV); } ri += vectorSize; } if(PanelMode) ri += vectorSize*(stride - offset - depth); } if (j < rows) { if(PanelMode) ri += offset*(rows - j); for(Index i = 0; i < depth; i++) { Index k = j; for(; k < rows; k++) { blockA[ri] = lhs(k, i); ri += 1; } } } } }; // General template for rhs packing, float64 specialization. template struct dhs_pack { EIGEN_STRONG_INLINE void operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { const Index vectorSize = quad_traits::vectorsize; Index ri = 0, j = 0; for(; j + 2*vectorSize <= cols; j+=2*vectorSize) { Index i = 0; if(PanelMode) ri += offset*(2*vectorSize); for(; i + vectorSize <= depth; i+=vectorSize) { PacketBlock block; if(StorageOrder == ColMajor) { PacketBlock block1, block2; block1.packet[0] = rhs.template loadPacket(i, j + 0); block1.packet[1] = rhs.template loadPacket(i, j + 1); block2.packet[0] = rhs.template loadPacket(i, j + 2); block2.packet[1] = rhs.template loadPacket(i, j + 3); ptranspose(block1); ptranspose(block2); pstore(blockB + ri , block1.packet[0]); pstore(blockB + ri + 2, block2.packet[0]); pstore(blockB + ri + 4, block1.packet[1]); pstore(blockB + ri + 6, block2.packet[1]); } else { block.packet[0] = rhs.template loadPacket(i + 0, j + 0); //[a1 a2] block.packet[1] = rhs.template loadPacket(i + 0, j + 2); //[a3 a4] block.packet[2] = rhs.template loadPacket(i + 1, j + 0); //[b1 b2] block.packet[3] = rhs.template loadPacket(i + 1, j + 2); //[b3 b4] storeBlock(blockB + ri, block); } ri += 4*vectorSize; } for(; i < depth; i++) { if(StorageOrder == ColMajor) { blockB[ri+0] = rhs(i, j+0); blockB[ri+1] = rhs(i, j+1); ri += vectorSize; blockB[ri+0] = rhs(i, j+2); blockB[ri+1] = rhs(i, j+3); } else { Packet2d rhsV = rhs.template loadPacket(i, j); pstore(blockB + ri, rhsV); ri += vectorSize; rhsV = rhs.template loadPacket(i, j + 2); pstore(blockB + ri, rhsV); } ri += vectorSize; } if(PanelMode) ri += (2*vectorSize)*(stride - offset - depth); } if (j < cols) { if(PanelMode) ri += offset*(cols - j); for(Index i = 0; i < depth; i++) { Index k = j; for(; k < cols; k++) { blockB[ri] = rhs(i, k); ri += 1; } } } } }; // General template for lhs complex packing, float64 specialization. template struct dhs_cpack { EIGEN_STRONG_INLINE void operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { const Index vectorSize = quad_traits::vectorsize; const Index vectorDelta = vectorSize * ((PanelMode) ? stride : depth); Index rir = ((PanelMode) ? (vectorSize*offset) : 0), rii; double* blockAt = reinterpret_cast(blockA); Index j = 0; for(; j + vectorSize <= rows; j+=vectorSize) { Index i = 0; rii = rir + vectorDelta; for(; i + vectorSize <= depth; i+=vectorSize) { PacketBlock blockr, blocki; PacketBlock cblock; if(StorageOrder == ColMajor) { cblock.packet[0] = lhs.template loadPacket(j, i + 0); //[a1 a1i] cblock.packet[1] = lhs.template loadPacket(j, i + 1); //[b1 b1i] cblock.packet[2] = lhs.template loadPacket(j + 1, i + 0); //[a2 a2i] cblock.packet[3] = lhs.template loadPacket(j + 1, i + 1); //[b2 b2i] blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[2].v, p16uc_GETREAL64); //[a1 a2] blockr.packet[1] = vec_perm(cblock.packet[1].v, cblock.packet[3].v, p16uc_GETREAL64); //[b1 b2] blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[2].v, p16uc_GETIMAG64); blocki.packet[1] = vec_perm(cblock.packet[1].v, cblock.packet[3].v, p16uc_GETIMAG64); } else { cblock.packet[0] = lhs.template loadPacket(j + 0, i); //[a1 a1i] cblock.packet[1] = lhs.template loadPacket(j + 1, i); //[a2 a2i] cblock.packet[2] = lhs.template loadPacket(j + 0, i + 1); //[b1 b1i] cblock.packet[3] = lhs.template loadPacket(j + 1, i + 1); //[b2 b2i blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETREAL64); //[a1 a2] blockr.packet[1] = vec_perm(cblock.packet[2].v, cblock.packet[3].v, p16uc_GETREAL64); //[b1 b2] blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETIMAG64); blocki.packet[1] = vec_perm(cblock.packet[2].v, cblock.packet[3].v, p16uc_GETIMAG64); } if(Conjugate) { blocki.packet[0] = -blocki.packet[0]; blocki.packet[1] = -blocki.packet[1]; } storeBlock(blockAt + rir, blockr); storeBlock(blockAt + rii, blocki); rir += 2*vectorSize; rii += 2*vectorSize; } for(; i < depth; i++) { PacketBlock blockr, blocki; PacketBlock cblock; cblock.packet[0] = lhs.template loadPacket(j + 0, i); cblock.packet[1] = lhs.template loadPacket(j + 1, i); blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETREAL64); blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETIMAG64); if(Conjugate) { blocki.packet[0] = -blocki.packet[0]; } pstore(blockAt + rir, blockr.packet[0]); pstore(blockAt + rii, blocki.packet[0]); rir += vectorSize; rii += vectorSize; } rir += ((PanelMode) ? (vectorSize*(2*stride - depth)) : vectorDelta); } if (j < rows) { if(PanelMode) rir += (offset*(rows - j - vectorSize)); rii = rir + (((PanelMode) ? stride : depth) * (rows - j)); for(Index i = 0; i < depth; i++) { Index k = j; for(; k < rows; k++) { blockAt[rir] = lhs(k, i).real(); if(Conjugate) blockAt[rii] = -lhs(k, i).imag(); else blockAt[rii] = lhs(k, i).imag(); rir += 1; rii += 1; } } } } }; // General template for rhs complex packing, float64 specialization. template struct dhs_cpack { EIGEN_STRONG_INLINE void operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { const Index vectorSize = quad_traits::vectorsize; const Index vectorDelta = 2*vectorSize * ((PanelMode) ? stride : depth); Index rir = ((PanelMode) ? (2*vectorSize*offset) : 0), rii; double* blockBt = reinterpret_cast(blockB); Index j = 0; for(; j + 2*vectorSize <= cols; j+=2*vectorSize) { Index i = 0; rii = rir + vectorDelta; for(; i < depth; i++) { PacketBlock cblock; PacketBlock blockr, blocki; bload(cblock, rhs, i, j); blockr.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETREAL64); blockr.packet[1] = vec_perm(cblock.packet[2].v, cblock.packet[3].v, p16uc_GETREAL64); blocki.packet[0] = vec_perm(cblock.packet[0].v, cblock.packet[1].v, p16uc_GETIMAG64); blocki.packet[1] = vec_perm(cblock.packet[2].v, cblock.packet[3].v, p16uc_GETIMAG64); if(Conjugate) { blocki.packet[0] = -blocki.packet[0]; blocki.packet[1] = -blocki.packet[1]; } storeBlock(blockBt + rir, blockr); storeBlock(blockBt + rii, blocki); rir += 2*vectorSize; rii += 2*vectorSize; } rir += ((PanelMode) ? (2*vectorSize*(2*stride - depth)) : vectorDelta); } if (j < cols) { if(PanelMode) rir += (offset*(cols - j - 2*vectorSize)); rii = rir + (((PanelMode) ? stride : depth) * (cols - j)); for(Index i = 0; i < depth; i++) { Index k = j; for(; k < cols; k++) { blockBt[rir] = rhs(i, k).real(); if(Conjugate) blockBt[rii] = -rhs(i, k).imag(); else blockBt[rii] = rhs(i, k).imag(); rir += 1; rii += 1; } } } } }; /************** * GEMM utils * **************/ // 512-bits rank1-update of acc. It can either positive or negative accumulate (useful for complex gemm). template EIGEN_ALWAYS_INLINE void pger_common(PacketBlock* acc, const Packet& lhsV, const Packet* rhsV) { if(NegativeAccumulate) { acc->packet[0] = vec_nmsub(lhsV, rhsV[0], acc->packet[0]); acc->packet[1] = vec_nmsub(lhsV, rhsV[1], acc->packet[1]); acc->packet[2] = vec_nmsub(lhsV, rhsV[2], acc->packet[2]); acc->packet[3] = vec_nmsub(lhsV, rhsV[3], acc->packet[3]); } else { acc->packet[0] = vec_madd(lhsV, rhsV[0], acc->packet[0]); acc->packet[1] = vec_madd(lhsV, rhsV[1], acc->packet[1]); acc->packet[2] = vec_madd(lhsV, rhsV[2], acc->packet[2]); acc->packet[3] = vec_madd(lhsV, rhsV[3], acc->packet[3]); } } template EIGEN_ALWAYS_INLINE void pger_common(PacketBlock* acc, const Packet& lhsV, const Packet* rhsV) { if(NegativeAccumulate) { acc->packet[0] = vec_nmsub(lhsV, rhsV[0], acc->packet[0]); } else { acc->packet[0] = vec_madd(lhsV, rhsV[0], acc->packet[0]); } } template EIGEN_ALWAYS_INLINE void pger(PacketBlock* acc, const Scalar* lhs, const Packet* rhsV) { Packet lhsV = pload(lhs); pger_common(acc, lhsV, rhsV); } template EIGEN_ALWAYS_INLINE void loadPacketRemaining(const Scalar* lhs, Packet &lhsV, Index remaining_rows) { #ifdef _ARCH_PWR9 lhsV = vec_xl_len((Scalar *)lhs, remaining_rows * sizeof(Scalar)); #else Index i = 0; do { lhsV[i] = lhs[i]; } while (++i < remaining_rows); #endif } template EIGEN_ALWAYS_INLINE void pger(PacketBlock* acc, const Scalar* lhs, const Packet* rhsV, Index remaining_rows) { Packet lhsV; loadPacketRemaining(lhs, lhsV, remaining_rows); pger_common(acc, lhsV, rhsV); } // 512-bits rank1-update of complex acc. It takes decoupled accumulators as entries. It also takes cares of mixed types real * complex and complex * real. template EIGEN_ALWAYS_INLINE void pgerc_common(PacketBlock* accReal, PacketBlock* accImag, const Packet &lhsV, const Packet &lhsVi, const Packet* rhsV, const Packet* rhsVi) { pger_common(accReal, lhsV, rhsV); if(LhsIsReal) { pger_common(accImag, lhsV, rhsVi); EIGEN_UNUSED_VARIABLE(lhsVi); } else { if (!RhsIsReal) { pger_common(accReal, lhsVi, rhsVi); pger_common(accImag, lhsV, rhsVi); } else { EIGEN_UNUSED_VARIABLE(rhsVi); } pger_common(accImag, lhsVi, rhsV); } } template EIGEN_ALWAYS_INLINE void pgerc(PacketBlock* accReal, PacketBlock* accImag, const Scalar* lhs_ptr, const Scalar* lhs_ptr_imag, const Packet* rhsV, const Packet* rhsVi) { Packet lhsV = ploadLhs(lhs_ptr); Packet lhsVi; if(!LhsIsReal) lhsVi = ploadLhs(lhs_ptr_imag); else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); pgerc_common(accReal, accImag, lhsV, lhsVi, rhsV, rhsVi); } template EIGEN_ALWAYS_INLINE void loadPacketRemaining(const Scalar* lhs_ptr, const Scalar* lhs_ptr_imag, Packet &lhsV, Packet &lhsVi, Index remaining_rows) { #ifdef _ARCH_PWR9 lhsV = vec_xl_len((Scalar *)lhs_ptr, remaining_rows * sizeof(Scalar)); if(!LhsIsReal) lhsVi = vec_xl_len((Scalar *)lhs_ptr_imag, remaining_rows * sizeof(Scalar)); else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); #else Index i = 0; do { lhsV[i] = lhs_ptr[i]; if(!LhsIsReal) lhsVi[i] = lhs_ptr_imag[i]; } while (++i < remaining_rows); if(LhsIsReal) EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); #endif } template EIGEN_ALWAYS_INLINE void pgerc(PacketBlock* accReal, PacketBlock* accImag, const Scalar* lhs_ptr, const Scalar* lhs_ptr_imag, const Packet* rhsV, const Packet* rhsVi, Index remaining_rows) { Packet lhsV, lhsVi; loadPacketRemaining(lhs_ptr, lhs_ptr_imag, lhsV, lhsVi, remaining_rows); pgerc_common(accReal, accImag, lhsV, lhsVi, rhsV, rhsVi); } template EIGEN_ALWAYS_INLINE Packet ploadLhs(const Scalar* lhs) { return *reinterpret_cast(const_cast(lhs)); } // Zero the accumulator on PacketBlock. template EIGEN_ALWAYS_INLINE void bsetzero(PacketBlock& acc) { acc.packet[0] = pset1((Scalar)0); acc.packet[1] = pset1((Scalar)0); acc.packet[2] = pset1((Scalar)0); acc.packet[3] = pset1((Scalar)0); } template EIGEN_ALWAYS_INLINE void bsetzero(PacketBlock& acc) { acc.packet[0] = pset1((Scalar)0); } // Scale the PacketBlock vectors by alpha. template EIGEN_ALWAYS_INLINE void bscale(PacketBlock& acc, PacketBlock& accZ, const Packet& pAlpha) { acc.packet[0] = pmadd(pAlpha, accZ.packet[0], acc.packet[0]); acc.packet[1] = pmadd(pAlpha, accZ.packet[1], acc.packet[1]); acc.packet[2] = pmadd(pAlpha, accZ.packet[2], acc.packet[2]); acc.packet[3] = pmadd(pAlpha, accZ.packet[3], acc.packet[3]); } template EIGEN_ALWAYS_INLINE void bscale(PacketBlock& acc, PacketBlock& accZ, const Packet& pAlpha) { acc.packet[0] = pmadd(pAlpha, accZ.packet[0], acc.packet[0]); } template EIGEN_ALWAYS_INLINE void bscalec_common(PacketBlock& acc, PacketBlock& accZ, const Packet& pAlpha) { acc.packet[0] = pmul(accZ.packet[0], pAlpha); acc.packet[1] = pmul(accZ.packet[1], pAlpha); acc.packet[2] = pmul(accZ.packet[2], pAlpha); acc.packet[3] = pmul(accZ.packet[3], pAlpha); } template EIGEN_ALWAYS_INLINE void bscalec_common(PacketBlock& acc, PacketBlock& accZ, const Packet& pAlpha) { acc.packet[0] = pmul(accZ.packet[0], pAlpha); } // Complex version of PacketBlock scaling. template EIGEN_ALWAYS_INLINE void bscalec(PacketBlock& aReal, PacketBlock& aImag, const Packet& bReal, const Packet& bImag, PacketBlock& cReal, PacketBlock& cImag) { bscalec_common(cReal, aReal, bReal); bscalec_common(cImag, aImag, bReal); pger_common(&cReal, bImag, aImag.packet); pger_common(&cImag, bImag, aReal.packet); } template EIGEN_ALWAYS_INLINE void band(PacketBlock& acc, const Packet& pMask) { acc.packet[0] = pand(acc.packet[0], pMask); acc.packet[1] = pand(acc.packet[1], pMask); acc.packet[2] = pand(acc.packet[2], pMask); acc.packet[3] = pand(acc.packet[3], pMask); } template EIGEN_ALWAYS_INLINE void bscalec(PacketBlock& aReal, PacketBlock& aImag, const Packet& bReal, const Packet& bImag, PacketBlock& cReal, PacketBlock& cImag, const Packet& pMask) { band(aReal, pMask); band(aImag, pMask); bscalec(aReal, aImag, bReal, bImag, cReal, cImag); } // Load a PacketBlock, the N parameters make tunning gemm easier so we can add more accumulators as needed. template EIGEN_ALWAYS_INLINE void bload(PacketBlock& acc, const DataMapper& res, Index row, Index col) { if (StorageOrder == RowMajor) { acc.packet[0] = res.template loadPacket(row + 0, col + N*accCols); acc.packet[1] = res.template loadPacket(row + 1, col + N*accCols); acc.packet[2] = res.template loadPacket(row + 2, col + N*accCols); acc.packet[3] = res.template loadPacket(row + 3, col + N*accCols); } else { acc.packet[0] = res.template loadPacket(row + N*accCols, col + 0); acc.packet[1] = res.template loadPacket(row + N*accCols, col + 1); acc.packet[2] = res.template loadPacket(row + N*accCols, col + 2); acc.packet[3] = res.template loadPacket(row + N*accCols, col + 3); } } // An overload of bload when you have a PacketBLock with 8 vectors. template EIGEN_ALWAYS_INLINE void bload(PacketBlock& acc, const DataMapper& res, Index row, Index col) { if (StorageOrder == RowMajor) { acc.packet[0] = res.template loadPacket(row + 0, col + N*accCols); acc.packet[1] = res.template loadPacket(row + 1, col + N*accCols); acc.packet[2] = res.template loadPacket(row + 2, col + N*accCols); acc.packet[3] = res.template loadPacket(row + 3, col + N*accCols); acc.packet[4] = res.template loadPacket(row + 0, col + (N+1)*accCols); acc.packet[5] = res.template loadPacket(row + 1, col + (N+1)*accCols); acc.packet[6] = res.template loadPacket(row + 2, col + (N+1)*accCols); acc.packet[7] = res.template loadPacket(row + 3, col + (N+1)*accCols); } else { acc.packet[0] = res.template loadPacket(row + N*accCols, col + 0); acc.packet[1] = res.template loadPacket(row + N*accCols, col + 1); acc.packet[2] = res.template loadPacket(row + N*accCols, col + 2); acc.packet[3] = res.template loadPacket(row + N*accCols, col + 3); acc.packet[4] = res.template loadPacket(row + (N+1)*accCols, col + 0); acc.packet[5] = res.template loadPacket(row + (N+1)*accCols, col + 1); acc.packet[6] = res.template loadPacket(row + (N+1)*accCols, col + 2); acc.packet[7] = res.template loadPacket(row + (N+1)*accCols, col + 3); } } template EIGEN_ALWAYS_INLINE void bload(PacketBlock& acc, const DataMapper& res, Index row, Index col) { acc.packet[0] = res.template loadPacket(row + N*accCols, col + 0); acc.packet[1] = res.template loadPacket(row + (N+1)*accCols, col + 0); } const static Packet4i mask41 = { -1, 0, 0, 0 }; const static Packet4i mask42 = { -1, -1, 0, 0 }; const static Packet4i mask43 = { -1, -1, -1, 0 }; const static Packet2l mask21 = { -1, 0 }; template EIGEN_ALWAYS_INLINE Packet bmask(const int remaining_rows) { if (remaining_rows == 0) { return pset1(float(0.0)); // Not used } else { switch (remaining_rows) { case 1: return Packet(mask41); case 2: return Packet(mask42); default: return Packet(mask43); } } } template<> EIGEN_ALWAYS_INLINE Packet2d bmask(const int remaining_rows) { if (remaining_rows == 0) { return pset1(double(0.0)); // Not used } else { return Packet2d(mask21); } } template EIGEN_ALWAYS_INLINE void bscale(PacketBlock& acc, PacketBlock& accZ, const Packet& pAlpha, const Packet& pMask) { band(accZ, pMask); bscale(acc, accZ, pAlpha); } template EIGEN_ALWAYS_INLINE void pbroadcast4_old(const __UNPACK_TYPE__(Packet)* a, Packet& a0, Packet& a1, Packet& a2, Packet& a3) { pbroadcast4(a, a0, a1, a2, a3); } template<> EIGEN_ALWAYS_INLINE void pbroadcast4_old(const double* a, Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3) { a1 = pload(a); a3 = pload(a + 2); a0 = vec_splat(a1, 0); a1 = vec_splat(a1, 1); a2 = vec_splat(a3, 0); a3 = vec_splat(a3, 1); } // PEEL loop factor. #define PEEL 7 template EIGEN_ALWAYS_INLINE void MICRO_EXTRA_COL( const Scalar* &lhs_ptr, const Scalar* &rhs_ptr, PacketBlock &accZero, Index remaining_rows, Index remaining_cols) { Packet rhsV[1]; rhsV[0] = pset1(rhs_ptr[0]); pger<1,Scalar, Packet, false>(&accZero, lhs_ptr, rhsV); lhs_ptr += remaining_rows; rhs_ptr += remaining_cols; } template EIGEN_STRONG_INLINE void gemm_extra_col( const DataMapper& res, const Scalar* lhs_base, const Scalar* rhs_base, Index depth, Index strideA, Index offsetA, Index row, Index col, Index remaining_rows, Index remaining_cols, const Packet& pAlpha) { const Scalar* rhs_ptr = rhs_base; const Scalar* lhs_ptr = lhs_base + row*strideA + remaining_rows*offsetA; PacketBlock accZero; bsetzero(accZero); Index remaining_depth = (depth & -accRows); Index k = 0; for(; k + PEEL <= remaining_depth; k+= PEEL) { EIGEN_POWER_PREFETCH(rhs_ptr); EIGEN_POWER_PREFETCH(lhs_ptr); for (int l = 0; l < PEEL; l++) { MICRO_EXTRA_COL(lhs_ptr, rhs_ptr, accZero, remaining_rows, remaining_cols); } } for(; k < remaining_depth; k++) { MICRO_EXTRA_COL(lhs_ptr, rhs_ptr, accZero, remaining_rows, remaining_cols); } for(; k < depth; k++) { Packet rhsV[1]; rhsV[0] = pset1(rhs_ptr[0]); pger<1, Scalar, Packet, Index, false>(&accZero, lhs_ptr, rhsV, remaining_rows); lhs_ptr += remaining_rows; rhs_ptr += remaining_cols; } accZero.packet[0] = vec_mul(pAlpha, accZero.packet[0]); for(Index i = 0; i < remaining_rows; i++) { res(row + i, col) += accZero.packet[0][i]; } } template EIGEN_ALWAYS_INLINE void MICRO_EXTRA_ROW( const Scalar* &lhs_ptr, const Scalar* &rhs_ptr, PacketBlock &accZero, Index remaining_rows) { Packet rhsV[4]; pbroadcast4(rhs_ptr, rhsV[0], rhsV[1], rhsV[2], rhsV[3]); pger<4, Scalar, Packet, false>(&accZero, lhs_ptr, rhsV); lhs_ptr += remaining_rows; rhs_ptr += accRows; } template EIGEN_STRONG_INLINE void gemm_extra_row( const DataMapper& res, const Scalar* lhs_base, const Scalar* rhs_base, Index depth, Index strideA, Index offsetA, Index row, Index col, Index rows, Index cols, Index remaining_rows, const Packet& pAlpha, const Packet& pMask) { const Scalar* rhs_ptr = rhs_base; const Scalar* lhs_ptr = lhs_base + row*strideA + remaining_rows*offsetA; PacketBlock accZero, acc; bsetzero(accZero); Index remaining_depth = (col + accRows < cols) ? depth : (depth & -accRows); Index k = 0; for(; k + PEEL <= remaining_depth; k+= PEEL) { EIGEN_POWER_PREFETCH(rhs_ptr); EIGEN_POWER_PREFETCH(lhs_ptr); for (int l = 0; l < PEEL; l++) { MICRO_EXTRA_ROW(lhs_ptr, rhs_ptr, accZero, remaining_rows); } } for(; k < remaining_depth; k++) { MICRO_EXTRA_ROW(lhs_ptr, rhs_ptr, accZero, remaining_rows); } if ((remaining_depth == depth) && (rows >= accCols)) { for(Index j = 0; j < 4; j++) { acc.packet[j] = res.template loadPacket(row, col + j); } bscale(acc, accZero, pAlpha, pMask); res.template storePacketBlock(row, col, acc); } else { for(; k < depth; k++) { Packet rhsV[4]; pbroadcast4(rhs_ptr, rhsV[0], rhsV[1], rhsV[2], rhsV[3]); pger<4, Scalar, Packet, Index, false>(&accZero, lhs_ptr, rhsV, remaining_rows); lhs_ptr += remaining_rows; rhs_ptr += accRows; } for(Index j = 0; j < 4; j++) { accZero.packet[j] = vec_mul(pAlpha, accZero.packet[j]); } for(Index j = 0; j < 4; j++) { for(Index i = 0; i < remaining_rows; i++) { res(row + i, col + j) += accZero.packet[j][i]; } } } } #define MICRO_UNROLL(func) \ func(0) func(1) func(2) func(3) func(4) func(5) func(6) func(7) #define MICRO_UNROLL_WORK(func, func2, peel) \ MICRO_UNROLL(func2); \ func(0,peel) func(1,peel) func(2,peel) func(3,peel) \ func(4,peel) func(5,peel) func(6,peel) func(7,peel) #define MICRO_LOAD_ONE(iter) \ if (unroll_factor > iter) { \ lhsV##iter = ploadLhs(lhs_ptr##iter); \ lhs_ptr##iter += accCols; \ } else { \ EIGEN_UNUSED_VARIABLE(lhsV##iter); \ } #define MICRO_WORK_ONE(iter, peel) \ if (unroll_factor > iter) { \ pger_common(&accZero##iter, lhsV##iter, rhsV##peel); \ } #define MICRO_TYPE_PEEL4(func, func2, peel) \ if (PEEL > peel) { \ Packet lhsV0, lhsV1, lhsV2, lhsV3, lhsV4, lhsV5, lhsV6, lhsV7; \ pbroadcast4(rhs_ptr + (accRows * peel), rhsV##peel[0], rhsV##peel[1], rhsV##peel[2], rhsV##peel[3]); \ MICRO_UNROLL_WORK(func, func2, peel) \ } else { \ EIGEN_UNUSED_VARIABLE(rhsV##peel); \ } #define MICRO_TYPE_PEEL1(func, func2, peel) \ if (PEEL > peel) { \ Packet lhsV0, lhsV1, lhsV2, lhsV3, lhsV4, lhsV5, lhsV6, lhsV7; \ rhsV##peel[0] = pset1(rhs_ptr[remaining_cols * peel]); \ MICRO_UNROLL_WORK(func, func2, peel) \ } else { \ EIGEN_UNUSED_VARIABLE(rhsV##peel); \ } #define MICRO_UNROLL_TYPE_PEEL(M, func, func1, func2) \ Packet rhsV0[M], rhsV1[M], rhsV2[M], rhsV3[M], rhsV4[M], rhsV5[M], rhsV6[M], rhsV7[M], rhsV8[M], rhsV9[M]; \ func(func1,func2,0); func(func1,func2,1); \ func(func1,func2,2); func(func1,func2,3); \ func(func1,func2,4); func(func1,func2,5); \ func(func1,func2,6); func(func1,func2,7); \ func(func1,func2,8); func(func1,func2,9); #define MICRO_UNROLL_TYPE_ONE(M, func, func1, func2) \ Packet rhsV0[M]; \ func(func1,func2,0); #define MICRO_ONE_PEEL4 \ MICRO_UNROLL_TYPE_PEEL(4, MICRO_TYPE_PEEL4, MICRO_WORK_ONE, MICRO_LOAD_ONE); \ rhs_ptr += (accRows * PEEL); #define MICRO_ONE4 \ MICRO_UNROLL_TYPE_ONE(4, MICRO_TYPE_PEEL4, MICRO_WORK_ONE, MICRO_LOAD_ONE); \ rhs_ptr += accRows; #define MICRO_ONE_PEEL1 \ MICRO_UNROLL_TYPE_PEEL(1, MICRO_TYPE_PEEL1, MICRO_WORK_ONE, MICRO_LOAD_ONE); \ rhs_ptr += (remaining_cols * PEEL); #define MICRO_ONE1 \ MICRO_UNROLL_TYPE_ONE(1, MICRO_TYPE_PEEL1, MICRO_WORK_ONE, MICRO_LOAD_ONE); \ rhs_ptr += remaining_cols; #define MICRO_DST_PTR_ONE(iter) \ if (unroll_factor > iter) { \ bsetzero(accZero##iter); \ } else { \ EIGEN_UNUSED_VARIABLE(accZero##iter); \ } #define MICRO_DST_PTR MICRO_UNROLL(MICRO_DST_PTR_ONE) #define MICRO_SRC_PTR_ONE(iter) \ if (unroll_factor > iter) { \ lhs_ptr##iter = lhs_base + ( (row/accCols) + iter )*strideA*accCols + accCols*offsetA; \ } else { \ EIGEN_UNUSED_VARIABLE(lhs_ptr##iter); \ } #define MICRO_SRC_PTR MICRO_UNROLL(MICRO_SRC_PTR_ONE) #define MICRO_PREFETCH_ONE(iter) \ if (unroll_factor > iter) { \ EIGEN_POWER_PREFETCH(lhs_ptr##iter); \ } #define MICRO_PREFETCH MICRO_UNROLL(MICRO_PREFETCH_ONE) #define MICRO_STORE_ONE(iter) \ if (unroll_factor > iter) { \ acc.packet[0] = res.template loadPacket(row + iter*accCols, col + 0); \ acc.packet[1] = res.template loadPacket(row + iter*accCols, col + 1); \ acc.packet[2] = res.template loadPacket(row + iter*accCols, col + 2); \ acc.packet[3] = res.template loadPacket(row + iter*accCols, col + 3); \ bscale(acc, accZero##iter, pAlpha); \ res.template storePacketBlock(row + iter*accCols, col, acc); \ } #define MICRO_STORE MICRO_UNROLL(MICRO_STORE_ONE) #define MICRO_COL_STORE_ONE(iter) \ if (unroll_factor > iter) { \ acc.packet[0] = res.template loadPacket(row + iter*accCols, col + 0); \ bscale(acc, accZero##iter, pAlpha); \ res.template storePacketBlock(row + iter*accCols, col, acc); \ } #define MICRO_COL_STORE MICRO_UNROLL(MICRO_COL_STORE_ONE) template EIGEN_STRONG_INLINE void gemm_unrolled_iteration( const DataMapper& res, const Scalar* lhs_base, const Scalar* rhs_base, Index depth, Index strideA, Index offsetA, Index& row, Index col, const Packet& pAlpha) { const Scalar* rhs_ptr = rhs_base; const Scalar* lhs_ptr0, * lhs_ptr1, * lhs_ptr2, * lhs_ptr3, * lhs_ptr4, * lhs_ptr5, * lhs_ptr6, * lhs_ptr7; PacketBlock accZero0, accZero1, accZero2, accZero3, accZero4, accZero5, accZero6, accZero7; PacketBlock acc; MICRO_SRC_PTR MICRO_DST_PTR Index k = 0; for(; k + PEEL <= depth; k+= PEEL) { EIGEN_POWER_PREFETCH(rhs_ptr); MICRO_PREFETCH MICRO_ONE_PEEL4 } for(; k < depth; k++) { MICRO_ONE4 } MICRO_STORE row += unroll_factor*accCols; } template EIGEN_STRONG_INLINE void gemm_unrolled_col_iteration( const DataMapper& res, const Scalar* lhs_base, const Scalar* rhs_base, Index depth, Index strideA, Index offsetA, Index& row, Index col, Index remaining_cols, const Packet& pAlpha) { const Scalar* rhs_ptr = rhs_base; const Scalar* lhs_ptr0, * lhs_ptr1, * lhs_ptr2, * lhs_ptr3, * lhs_ptr4, * lhs_ptr5, * lhs_ptr6, *lhs_ptr7; PacketBlock accZero0, accZero1, accZero2, accZero3, accZero4, accZero5, accZero6, accZero7; PacketBlock acc; MICRO_SRC_PTR MICRO_DST_PTR Index k = 0; for(; k + PEEL <= depth; k+= PEEL) { EIGEN_POWER_PREFETCH(rhs_ptr); MICRO_PREFETCH MICRO_ONE_PEEL1 } for(; k < depth; k++) { MICRO_ONE1 } MICRO_COL_STORE row += unroll_factor*accCols; } template EIGEN_STRONG_INLINE void gemm_unrolled_col( const DataMapper& res, const Scalar* lhs_base, const Scalar* rhs_base, Index depth, Index strideA, Index offsetA, Index& row, Index rows, Index col, Index remaining_cols, const Packet& pAlpha) { #define MAX_UNROLL 6 while(row + MAX_UNROLL*accCols <= rows) { gemm_unrolled_col_iteration(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, remaining_cols, pAlpha); } switch( (rows-row)/accCols ) { #if MAX_UNROLL > 7 case 7: gemm_unrolled_col_iteration<7, Scalar, Packet, DataMapper, Index, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, remaining_cols, pAlpha); break; #endif #if MAX_UNROLL > 6 case 6: gemm_unrolled_col_iteration<6, Scalar, Packet, DataMapper, Index, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, remaining_cols, pAlpha); break; #endif #if MAX_UNROLL > 5 case 5: gemm_unrolled_col_iteration<5, Scalar, Packet, DataMapper, Index, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, remaining_cols, pAlpha); break; #endif #if MAX_UNROLL > 4 case 4: gemm_unrolled_col_iteration<4, Scalar, Packet, DataMapper, Index, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, remaining_cols, pAlpha); break; #endif #if MAX_UNROLL > 3 case 3: gemm_unrolled_col_iteration<3, Scalar, Packet, DataMapper, Index, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, remaining_cols, pAlpha); break; #endif #if MAX_UNROLL > 2 case 2: gemm_unrolled_col_iteration<2, Scalar, Packet, DataMapper, Index, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, remaining_cols, pAlpha); break; #endif #if MAX_UNROLL > 1 case 1: gemm_unrolled_col_iteration<1, Scalar, Packet, DataMapper, Index, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, remaining_cols, pAlpha); break; #endif default: break; } #undef MAX_UNROLL } /**************** * GEMM kernels * * **************/ template EIGEN_STRONG_INLINE void gemm(const DataMapper& res, const Scalar* blockA, const Scalar* blockB, Index rows, Index depth, Index cols, Scalar alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) { const Index remaining_rows = rows % accCols; const Index remaining_cols = cols % accRows; if( strideA == -1 ) strideA = depth; if( strideB == -1 ) strideB = depth; const Packet pAlpha = pset1(alpha); const Packet pMask = bmask((const int)(remaining_rows)); Index col = 0; for(; col + accRows <= cols; col += accRows) { const Scalar* rhs_base = blockB + col*strideB + accRows*offsetB; const Scalar* lhs_base = blockA; Index row = 0; #define MAX_UNROLL 6 while(row + MAX_UNROLL*accCols <= rows) { gemm_unrolled_iteration(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, pAlpha); } switch( (rows-row)/accCols ) { #if MAX_UNROLL > 7 case 7: gemm_unrolled_iteration<7, Scalar, Packet, DataMapper, Index, accRows, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, pAlpha); break; #endif #if MAX_UNROLL > 6 case 6: gemm_unrolled_iteration<6, Scalar, Packet, DataMapper, Index, accRows, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, pAlpha); break; #endif #if MAX_UNROLL > 5 case 5: gemm_unrolled_iteration<5, Scalar, Packet, DataMapper, Index, accRows, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, pAlpha); break; #endif #if MAX_UNROLL > 4 case 4: gemm_unrolled_iteration<4, Scalar, Packet, DataMapper, Index, accRows, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, pAlpha); break; #endif #if MAX_UNROLL > 3 case 3: gemm_unrolled_iteration<3, Scalar, Packet, DataMapper, Index, accRows, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, pAlpha); break; #endif #if MAX_UNROLL > 2 case 2: gemm_unrolled_iteration<2, Scalar, Packet, DataMapper, Index, accRows, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, pAlpha); break; #endif #if MAX_UNROLL > 1 case 1: gemm_unrolled_iteration<1, Scalar, Packet, DataMapper, Index, accRows, accCols>(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, pAlpha); break; #endif default: break; } #undef MAX_UNROLL if(remaining_rows > 0) { gemm_extra_row(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, rows, cols, remaining_rows, pAlpha, pMask); } } if(remaining_cols > 0) { const Scalar* rhs_base = blockB + col*strideB + remaining_cols*offsetB; const Scalar* lhs_base = blockA; for(; col < cols; col++) { Index row = 0; gemm_unrolled_col(res, lhs_base, rhs_base, depth, strideA, offsetA, row, rows, col, remaining_cols, pAlpha); if (remaining_rows > 0) { gemm_extra_col(res, lhs_base, rhs_base, depth, strideA, offsetA, row, col, remaining_rows, remaining_cols, pAlpha); } rhs_base++; } } } #define accColsC (accCols / 2) #define advanceRows ((LhsIsReal) ? 1 : 2) #define advanceCols ((RhsIsReal) ? 1 : 2) // PEEL_COMPLEX loop factor. #define PEEL_COMPLEX 3 template EIGEN_ALWAYS_INLINE void MICRO_COMPLEX_EXTRA_COL( const Scalar* &lhs_ptr_real, const Scalar* &lhs_ptr_imag, const Scalar* &rhs_ptr_real, const Scalar* &rhs_ptr_imag, PacketBlock &accReal, PacketBlock &accImag, Index remaining_rows, Index remaining_cols) { Packet rhsV[1], rhsVi[1]; rhsV[0] = pset1(rhs_ptr_real[0]); if(!RhsIsReal) rhsVi[0] = pset1(rhs_ptr_imag[0]); pgerc<1, Scalar, Packet, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(&accReal, &accImag, lhs_ptr_real, lhs_ptr_imag, rhsV, rhsVi); lhs_ptr_real += remaining_rows; if(!LhsIsReal) lhs_ptr_imag += remaining_rows; else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); rhs_ptr_real += remaining_cols; if(!RhsIsReal) rhs_ptr_imag += remaining_cols; else EIGEN_UNUSED_VARIABLE(rhs_ptr_imag); } template EIGEN_STRONG_INLINE void gemm_complex_extra_col( const DataMapper& res, const Scalar* lhs_base, const Scalar* rhs_base, Index depth, Index strideA, Index offsetA, Index strideB, Index row, Index col, Index remaining_rows, Index remaining_cols, const Packet& pAlphaReal, const Packet& pAlphaImag) { const Scalar* rhs_ptr_real = rhs_base; const Scalar* rhs_ptr_imag; if(!RhsIsReal) rhs_ptr_imag = rhs_base + remaining_cols*strideB; else EIGEN_UNUSED_VARIABLE(rhs_ptr_imag); const Scalar* lhs_ptr_real = lhs_base + advanceRows*row*strideA + remaining_rows*offsetA; const Scalar* lhs_ptr_imag; if(!LhsIsReal) lhs_ptr_imag = lhs_ptr_real + remaining_rows*strideA; else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); PacketBlock accReal, accImag; PacketBlock taccReal, taccImag; PacketBlock acc0, acc1; bsetzero(accReal); bsetzero(accImag); Index remaining_depth = (depth & -accRows); Index k = 0; for(; k + PEEL_COMPLEX <= remaining_depth; k+= PEEL_COMPLEX) { EIGEN_POWER_PREFETCH(rhs_ptr_real); if(!RhsIsReal) { EIGEN_POWER_PREFETCH(rhs_ptr_imag); } EIGEN_POWER_PREFETCH(lhs_ptr_real); if(!LhsIsReal) { EIGEN_POWER_PREFETCH(lhs_ptr_imag); } for (int l = 0; l < PEEL_COMPLEX; l++) { MICRO_COMPLEX_EXTRA_COL(lhs_ptr_real, lhs_ptr_imag, rhs_ptr_real, rhs_ptr_imag, accReal, accImag, remaining_rows, remaining_cols); } } for(; k < remaining_depth; k++) { MICRO_COMPLEX_EXTRA_COL(lhs_ptr_real, lhs_ptr_imag, rhs_ptr_real, rhs_ptr_imag, accReal, accImag, remaining_rows, remaining_cols); } for(; k < depth; k++) { Packet rhsV[1], rhsVi[1]; rhsV[0] = pset1(rhs_ptr_real[0]); if(!RhsIsReal) rhsVi[0] = pset1(rhs_ptr_imag[0]); pgerc<1, Scalar, Packet, Index, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(&accReal, &accImag, lhs_ptr_real, lhs_ptr_imag, rhsV, rhsVi, remaining_rows); lhs_ptr_real += remaining_rows; if(!LhsIsReal) lhs_ptr_imag += remaining_rows; rhs_ptr_real += remaining_cols; if(!RhsIsReal) rhs_ptr_imag += remaining_cols; } bscalec(accReal, accImag, pAlphaReal, pAlphaImag, taccReal, taccImag); bcouple_common(taccReal, taccImag, acc0, acc1); if ((sizeof(Scalar) == sizeof(float)) && (remaining_rows == 1)) { res(row + 0, col + 0) += pfirst(acc0.packet[0]); } else { acc0.packet[0] += res.template loadPacket(row + 0, col + 0); res.template storePacketBlock(row + 0, col + 0, acc0); if(remaining_rows > accColsC) { res(row + accColsC, col + 0) += pfirst(acc1.packet[0]); } } } template EIGEN_ALWAYS_INLINE void MICRO_COMPLEX_EXTRA_ROW( const Scalar* &lhs_ptr_real, const Scalar* &lhs_ptr_imag, const Scalar* &rhs_ptr_real, const Scalar* &rhs_ptr_imag, PacketBlock &accReal, PacketBlock &accImag, Index remaining_rows) { Packet rhsV[4], rhsVi[4]; pbroadcast4_old(rhs_ptr_real, rhsV[0], rhsV[1], rhsV[2], rhsV[3]); if(!RhsIsReal) pbroadcast4_old(rhs_ptr_imag, rhsVi[0], rhsVi[1], rhsVi[2], rhsVi[3]); pgerc<4, Scalar, Packet, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(&accReal, &accImag, lhs_ptr_real, lhs_ptr_imag, rhsV, rhsVi); lhs_ptr_real += remaining_rows; if(!LhsIsReal) lhs_ptr_imag += remaining_rows; else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); rhs_ptr_real += accRows; if(!RhsIsReal) rhs_ptr_imag += accRows; else EIGEN_UNUSED_VARIABLE(rhs_ptr_imag); } template EIGEN_STRONG_INLINE void gemm_complex_extra_row( const DataMapper& res, const Scalar* lhs_base, const Scalar* rhs_base, Index depth, Index strideA, Index offsetA, Index strideB, Index row, Index col, Index rows, Index cols, Index remaining_rows, const Packet& pAlphaReal, const Packet& pAlphaImag, const Packet& pMask) { const Scalar* rhs_ptr_real = rhs_base; const Scalar* rhs_ptr_imag; if(!RhsIsReal) rhs_ptr_imag = rhs_base + accRows*strideB; else EIGEN_UNUSED_VARIABLE(rhs_ptr_imag); const Scalar* lhs_ptr_real = lhs_base + advanceRows*row*strideA + remaining_rows*offsetA; const Scalar* lhs_ptr_imag; if(!LhsIsReal) lhs_ptr_imag = lhs_ptr_real + remaining_rows*strideA; else EIGEN_UNUSED_VARIABLE(lhs_ptr_imag); PacketBlock accReal, accImag; PacketBlock taccReal, taccImag; PacketBlock acc0, acc1; PacketBlock tRes; bsetzero(accReal); bsetzero(accImag); Index remaining_depth = (col + accRows < cols) ? depth : (depth & -accRows); Index k = 0; for(; k + PEEL_COMPLEX <= remaining_depth; k+= PEEL_COMPLEX) { EIGEN_POWER_PREFETCH(rhs_ptr_real); if(!RhsIsReal) { EIGEN_POWER_PREFETCH(rhs_ptr_imag); } EIGEN_POWER_PREFETCH(lhs_ptr_real); if(!LhsIsReal) { EIGEN_POWER_PREFETCH(lhs_ptr_imag); } for (int l = 0; l < PEEL_COMPLEX; l++) { MICRO_COMPLEX_EXTRA_ROW(lhs_ptr_real, lhs_ptr_imag, rhs_ptr_real, rhs_ptr_imag, accReal, accImag, remaining_rows); } } for(; k < remaining_depth; k++) { MICRO_COMPLEX_EXTRA_ROW(lhs_ptr_real, lhs_ptr_imag, rhs_ptr_real, rhs_ptr_imag, accReal, accImag, remaining_rows); } if ((remaining_depth == depth) && (rows >= accCols)) { bload(tRes, res, row, col); bscalec(accReal, accImag, pAlphaReal, pAlphaImag, taccReal, taccImag, pMask); bcouple(taccReal, taccImag, tRes, acc0, acc1); res.template storePacketBlock(row + 0, col, acc0); res.template storePacketBlock(row + accColsC, col, acc1); } else { for(; k < depth; k++) { Packet rhsV[4], rhsVi[4]; pbroadcast4_old(rhs_ptr_real, rhsV[0], rhsV[1], rhsV[2], rhsV[3]); if(!RhsIsReal) pbroadcast4_old(rhs_ptr_imag, rhsVi[0], rhsVi[1], rhsVi[2], rhsVi[3]); pgerc<4, Scalar, Packet, Index, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(&accReal, &accImag, lhs_ptr_real, lhs_ptr_imag, rhsV, rhsVi, remaining_rows); lhs_ptr_real += remaining_rows; if(!LhsIsReal) lhs_ptr_imag += remaining_rows; rhs_ptr_real += accRows; if(!RhsIsReal) rhs_ptr_imag += accRows; } bscalec(accReal, accImag, pAlphaReal, pAlphaImag, taccReal, taccImag); bcouple_common(taccReal, taccImag, acc0, acc1); if ((sizeof(Scalar) == sizeof(float)) && (remaining_rows == 1)) { for(Index j = 0; j < 4; j++) { res(row + 0, col + j) += pfirst(acc0.packet[j]); } } else { for(Index j = 0; j < 4; j++) { PacketBlock acc2; acc2.packet[0] = res.template loadPacket(row + 0, col + j) + acc0.packet[j]; res.template storePacketBlock(row + 0, col + j, acc2); if(remaining_rows > accColsC) { res(row + accColsC, col + j) += pfirst(acc1.packet[j]); } } } } } #define MICRO_COMPLEX_UNROLL(func) \ func(0) func(1) func(2) func(3) func(4) #define MICRO_COMPLEX_UNROLL_WORK(func, func2, peel) \ MICRO_COMPLEX_UNROLL(func2); \ func(0,peel) func(1,peel) func(2,peel) func(3,peel) func(4,peel) #define MICRO_COMPLEX_LOAD_ONE(iter) \ if (unroll_factor > iter) { \ lhsV##iter = ploadLhs(lhs_ptr_real##iter); \ lhs_ptr_real##iter += accCols; \ if(!LhsIsReal) { \ lhsVi##iter = ploadLhs(lhs_ptr_imag##iter); \ lhs_ptr_imag##iter += accCols; \ } else { \ EIGEN_UNUSED_VARIABLE(lhsVi##iter); \ } \ } else { \ EIGEN_UNUSED_VARIABLE(lhsV##iter); \ EIGEN_UNUSED_VARIABLE(lhsVi##iter); \ } #define MICRO_COMPLEX_WORK_ONE4(iter, peel) \ if (unroll_factor > iter) { \ pgerc_common<4, Packet, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(&accReal##iter, &accImag##iter, lhsV##iter, lhsVi##iter, rhsV##peel, rhsVi##peel); \ } #define MICRO_COMPLEX_WORK_ONE1(iter, peel) \ if (unroll_factor > iter) { \ pgerc_common<1, Packet, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(&accReal##iter, &accImag##iter, lhsV##iter, lhsVi##iter, rhsV##peel, rhsVi##peel); \ } #define MICRO_COMPLEX_TYPE_PEEL4(func, func2, peel) \ if (PEEL_COMPLEX > peel) { \ Packet lhsV0, lhsV1, lhsV2, lhsV3, lhsV4; \ Packet lhsVi0, lhsVi1, lhsVi2, lhsVi3, lhsVi4; \ pbroadcast4_old(rhs_ptr_real + (accRows * peel), rhsV##peel[0], rhsV##peel[1], rhsV##peel[2], rhsV##peel[3]); \ if(!RhsIsReal) { \ pbroadcast4_old(rhs_ptr_imag + (accRows * peel), rhsVi##peel[0], rhsVi##peel[1], rhsVi##peel[2], rhsVi##peel[3]); \ } else { \ EIGEN_UNUSED_VARIABLE(rhsVi##peel); \ } \ MICRO_COMPLEX_UNROLL_WORK(func, func2, peel) \ } else { \ EIGEN_UNUSED_VARIABLE(rhsV##peel); \ EIGEN_UNUSED_VARIABLE(rhsVi##peel); \ } #define MICRO_COMPLEX_TYPE_PEEL1(func, func2, peel) \ if (PEEL_COMPLEX > peel) { \ Packet lhsV0, lhsV1, lhsV2, lhsV3, lhsV4; \ Packet lhsVi0, lhsVi1, lhsVi2, lhsVi3, lhsVi4; \ rhsV##peel[0] = pset1(rhs_ptr_real[remaining_cols * peel]); \ if(!RhsIsReal) { \ rhsVi##peel[0] = pset1(rhs_ptr_imag[remaining_cols * peel]); \ } else { \ EIGEN_UNUSED_VARIABLE(rhsVi##peel); \ } \ MICRO_COMPLEX_UNROLL_WORK(func, func2, peel) \ } else { \ EIGEN_UNUSED_VARIABLE(rhsV##peel); \ EIGEN_UNUSED_VARIABLE(rhsVi##peel); \ } #define MICRO_COMPLEX_UNROLL_TYPE_PEEL(M, func, func1, func2) \ Packet rhsV0[M], rhsV1[M], rhsV2[M], rhsV3[M], rhsV4[M], rhsV5[M], rhsV6[M], rhsV7[M], rhsV8[M], rhsV9[M]; \ Packet rhsVi0[M], rhsVi1[M], rhsVi2[M], rhsVi3[M], rhsVi4[M], rhsVi5[M], rhsVi6[M], rhsVi7[M], rhsVi8[M], rhsVi9[M]; \ func(func1,func2,0); func(func1,func2,1); \ func(func1,func2,2); func(func1,func2,3); \ func(func1,func2,4); func(func1,func2,5); \ func(func1,func2,6); func(func1,func2,7); \ func(func1,func2,8); func(func1,func2,9); #define MICRO_COMPLEX_UNROLL_TYPE_ONE(M, func, func1, func2) \ Packet rhsV0[M], rhsVi0[M];\ func(func1,func2,0); #define MICRO_COMPLEX_ONE_PEEL4 \ MICRO_COMPLEX_UNROLL_TYPE_PEEL(4, MICRO_COMPLEX_TYPE_PEEL4, MICRO_COMPLEX_WORK_ONE4, MICRO_COMPLEX_LOAD_ONE); \ rhs_ptr_real += (accRows * PEEL_COMPLEX); \ if(!RhsIsReal) rhs_ptr_imag += (accRows * PEEL_COMPLEX); #define MICRO_COMPLEX_ONE4 \ MICRO_COMPLEX_UNROLL_TYPE_ONE(4, MICRO_COMPLEX_TYPE_PEEL4, MICRO_COMPLEX_WORK_ONE4, MICRO_COMPLEX_LOAD_ONE); \ rhs_ptr_real += accRows; \ if(!RhsIsReal) rhs_ptr_imag += accRows; #define MICRO_COMPLEX_ONE_PEEL1 \ MICRO_COMPLEX_UNROLL_TYPE_PEEL(1, MICRO_COMPLEX_TYPE_PEEL1, MICRO_COMPLEX_WORK_ONE1, MICRO_COMPLEX_LOAD_ONE); \ rhs_ptr_real += (remaining_cols * PEEL_COMPLEX); \ if(!RhsIsReal) rhs_ptr_imag += (remaining_cols * PEEL_COMPLEX); #define MICRO_COMPLEX_ONE1 \ MICRO_COMPLEX_UNROLL_TYPE_ONE(1, MICRO_COMPLEX_TYPE_PEEL1, MICRO_COMPLEX_WORK_ONE1, MICRO_COMPLEX_LOAD_ONE); \ rhs_ptr_real += remaining_cols; \ if(!RhsIsReal) rhs_ptr_imag += remaining_cols; #define MICRO_COMPLEX_DST_PTR_ONE(iter) \ if (unroll_factor > iter) { \ bsetzero(accReal##iter); \ bsetzero(accImag##iter); \ } else { \ EIGEN_UNUSED_VARIABLE(accReal##iter); \ EIGEN_UNUSED_VARIABLE(accImag##iter); \ } #define MICRO_COMPLEX_DST_PTR MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_DST_PTR_ONE) #define MICRO_COMPLEX_SRC_PTR_ONE(iter) \ if (unroll_factor > iter) { \ lhs_ptr_real##iter = lhs_base + ( ((advanceRows*row)/accCols) + iter*advanceRows )*strideA*accCols + accCols*offsetA; \ if(!LhsIsReal) { \ lhs_ptr_imag##iter = lhs_ptr_real##iter + accCols*strideA; \ } else { \ EIGEN_UNUSED_VARIABLE(lhs_ptr_imag##iter); \ } \ } else { \ EIGEN_UNUSED_VARIABLE(lhs_ptr_real##iter); \ EIGEN_UNUSED_VARIABLE(lhs_ptr_imag##iter); \ } #define MICRO_COMPLEX_SRC_PTR MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_SRC_PTR_ONE) #define MICRO_COMPLEX_PREFETCH_ONE(iter) \ if (unroll_factor > iter) { \ EIGEN_POWER_PREFETCH(lhs_ptr_real##iter); \ if(!LhsIsReal) { \ EIGEN_POWER_PREFETCH(lhs_ptr_imag##iter); \ } \ } #define MICRO_COMPLEX_PREFETCH MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_PREFETCH_ONE) #define MICRO_COMPLEX_STORE_ONE(iter) \ if (unroll_factor > iter) { \ bload(tRes, res, row + iter*accCols, col); \ bscalec(accReal##iter, accImag##iter, pAlphaReal, pAlphaImag, taccReal, taccImag); \ bcouple(taccReal, taccImag, tRes, acc0, acc1); \ res.template storePacketBlock(row + iter*accCols + 0, col, acc0); \ res.template storePacketBlock(row + iter*accCols + accColsC, col, acc1); \ } #define MICRO_COMPLEX_STORE MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_STORE_ONE) #define MICRO_COMPLEX_COL_STORE_ONE(iter) \ if (unroll_factor > iter) { \ bload(tRes, res, row + iter*accCols, col); \ bscalec(accReal##iter, accImag##iter, pAlphaReal, pAlphaImag, taccReal, taccImag); \ bcouple(taccReal, taccImag, tRes, acc0, acc1); \ res.template storePacketBlock(row + iter*accCols + 0, col, acc0); \ res.template storePacketBlock(row + iter*accCols + accColsC, col, acc1); \ } #define MICRO_COMPLEX_COL_STORE MICRO_COMPLEX_UNROLL(MICRO_COMPLEX_COL_STORE_ONE) template EIGEN_STRONG_INLINE void gemm_complex_unrolled_iteration( const DataMapper& res, const Scalar* lhs_base, const Scalar* rhs_base, Index depth, Index strideA, Index offsetA, Index strideB, Index& row, Index col, const Packet& pAlphaReal, const Packet& pAlphaImag) { const Scalar* rhs_ptr_real = rhs_base; const Scalar* rhs_ptr_imag; if(!RhsIsReal) { rhs_ptr_imag = rhs_base + accRows*strideB; } else { EIGEN_UNUSED_VARIABLE(rhs_ptr_imag); } const Scalar* lhs_ptr_real0, * lhs_ptr_imag0, * lhs_ptr_real1, * lhs_ptr_imag1; const Scalar* lhs_ptr_real2, * lhs_ptr_imag2, * lhs_ptr_real3, * lhs_ptr_imag3; const Scalar* lhs_ptr_real4, * lhs_ptr_imag4; PacketBlock accReal0, accImag0, accReal1, accImag1; PacketBlock accReal2, accImag2, accReal3, accImag3; PacketBlock accReal4, accImag4; PacketBlock taccReal, taccImag; PacketBlock acc0, acc1; PacketBlock tRes; MICRO_COMPLEX_SRC_PTR MICRO_COMPLEX_DST_PTR Index k = 0; for(; k + PEEL_COMPLEX <= depth; k+= PEEL_COMPLEX) { EIGEN_POWER_PREFETCH(rhs_ptr_real); if(!RhsIsReal) { EIGEN_POWER_PREFETCH(rhs_ptr_imag); } MICRO_COMPLEX_PREFETCH MICRO_COMPLEX_ONE_PEEL4 } for(; k < depth; k++) { MICRO_COMPLEX_ONE4 } MICRO_COMPLEX_STORE row += unroll_factor*accCols; } template EIGEN_STRONG_INLINE void gemm_complex_unrolled_col_iteration( const DataMapper& res, const Scalar* lhs_base, const Scalar* rhs_base, Index depth, Index strideA, Index offsetA, Index strideB, Index& row, Index col, Index remaining_cols, const Packet& pAlphaReal, const Packet& pAlphaImag) { const Scalar* rhs_ptr_real = rhs_base; const Scalar* rhs_ptr_imag; if(!RhsIsReal) { rhs_ptr_imag = rhs_base + remaining_cols*strideB; } else { EIGEN_UNUSED_VARIABLE(rhs_ptr_imag); } const Scalar* lhs_ptr_real0, * lhs_ptr_imag0, * lhs_ptr_real1, * lhs_ptr_imag1; const Scalar* lhs_ptr_real2, * lhs_ptr_imag2, * lhs_ptr_real3, * lhs_ptr_imag3; const Scalar* lhs_ptr_real4, * lhs_ptr_imag4; PacketBlock accReal0, accImag0, accReal1, accImag1; PacketBlock accReal2, accImag2, accReal3, accImag3; PacketBlock accReal4, accImag4; PacketBlock taccReal, taccImag; PacketBlock acc0, acc1; PacketBlock tRes; MICRO_COMPLEX_SRC_PTR MICRO_COMPLEX_DST_PTR Index k = 0; for(; k + PEEL_COMPLEX <= depth; k+= PEEL_COMPLEX) { EIGEN_POWER_PREFETCH(rhs_ptr_real); if(!RhsIsReal) { EIGEN_POWER_PREFETCH(rhs_ptr_imag); } MICRO_COMPLEX_PREFETCH MICRO_COMPLEX_ONE_PEEL1 } for(; k < depth; k++) { MICRO_COMPLEX_ONE1 } MICRO_COMPLEX_COL_STORE row += unroll_factor*accCols; } template EIGEN_STRONG_INLINE void gemm_complex_unrolled_col( const DataMapper& res, const Scalar* lhs_base, const Scalar* rhs_base, Index depth, Index strideA, Index offsetA, Index strideB, Index& row, Index rows, Index col, Index remaining_cols, const Packet& pAlphaReal, const Packet& pAlphaImag) { #define MAX_COMPLEX_UNROLL 3 while(row + MAX_COMPLEX_UNROLL*accCols <= rows) { gemm_complex_unrolled_col_iteration(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, remaining_cols, pAlphaReal, pAlphaImag); } switch( (rows-row)/accCols ) { #if MAX_COMPLEX_UNROLL > 4 case 4: gemm_complex_unrolled_col_iteration<4, Scalar, Packet, Packetc, DataMapper, Index, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, remaining_cols, pAlphaReal, pAlphaImag); break; #endif #if MAX_COMPLEX_UNROLL > 3 case 3: gemm_complex_unrolled_col_iteration<3, Scalar, Packet, Packetc, DataMapper, Index, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, remaining_cols, pAlphaReal, pAlphaImag); break; #endif #if MAX_COMPLEX_UNROLL > 2 case 2: gemm_complex_unrolled_col_iteration<2, Scalar, Packet, Packetc, DataMapper, Index, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, remaining_cols, pAlphaReal, pAlphaImag); break; #endif #if MAX_COMPLEX_UNROLL > 1 case 1: gemm_complex_unrolled_col_iteration<1, Scalar, Packet, Packetc, DataMapper, Index, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, remaining_cols, pAlphaReal, pAlphaImag); break; #endif default: break; } #undef MAX_COMPLEX_UNROLL } template EIGEN_STRONG_INLINE void gemm_complex(const DataMapper& res, const LhsScalar* blockAc, const RhsScalar* blockBc, Index rows, Index depth, Index cols, Scalarc alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) { const Index remaining_rows = rows % accCols; const Index remaining_cols = cols % accRows; if( strideA == -1 ) strideA = depth; if( strideB == -1 ) strideB = depth; const Packet pAlphaReal = pset1(alpha.real()); const Packet pAlphaImag = pset1(alpha.imag()); const Packet pMask = bmask((const int)(remaining_rows)); const Scalar* blockA = (Scalar *) blockAc; const Scalar* blockB = (Scalar *) blockBc; Index col = 0; for(; col + accRows <= cols; col += accRows) { const Scalar* rhs_base = blockB + advanceCols*col*strideB + accRows*offsetB; const Scalar* lhs_base = blockA; Index row = 0; #define MAX_COMPLEX_UNROLL 3 while(row + MAX_COMPLEX_UNROLL*accCols <= rows) { gemm_complex_unrolled_iteration(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, pAlphaReal, pAlphaImag); } switch( (rows-row)/accCols ) { #if MAX_COMPLEX_UNROLL > 4 case 4: gemm_complex_unrolled_iteration<4, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, pAlphaReal, pAlphaImag); break; #endif #if MAX_COMPLEX_UNROLL > 3 case 3: gemm_complex_unrolled_iteration<3, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, pAlphaReal, pAlphaImag); break; #endif #if MAX_COMPLEX_UNROLL > 2 case 2: gemm_complex_unrolled_iteration<2, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, pAlphaReal, pAlphaImag); break; #endif #if MAX_COMPLEX_UNROLL > 1 case 1: gemm_complex_unrolled_iteration<1, Scalar, Packet, Packetc, DataMapper, Index, accRows, accCols, ConjugateLhs, ConjugateRhs, LhsIsReal, RhsIsReal>(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, pAlphaReal, pAlphaImag); break; #endif default: break; } #undef MAX_COMPLEX_UNROLL if(remaining_rows > 0) { gemm_complex_extra_row(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, rows, cols, remaining_rows, pAlphaReal, pAlphaImag, pMask); } } if(remaining_cols > 0) { const Scalar* rhs_base = blockB + advanceCols*col*strideB + remaining_cols*offsetB; const Scalar* lhs_base = blockA; for(; col < cols; col++) { Index row = 0; gemm_complex_unrolled_col(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, rows, col, remaining_cols, pAlphaReal, pAlphaImag); if (remaining_rows > 0) { gemm_complex_extra_col(res, lhs_base, rhs_base, depth, strideA, offsetA, strideB, row, col, remaining_rows, remaining_cols, pAlphaReal, pAlphaImag); } rhs_base++; } } } #undef accColsC #undef advanceCols #undef advanceRows /************************************ * ppc64le template specializations * * **********************************/ template struct gemm_pack_lhs { void operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); }; template void gemm_pack_lhs ::operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { dhs_pack pack; pack(blockA, lhs, depth, rows, stride, offset); } template struct gemm_pack_lhs { void operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); }; template void gemm_pack_lhs ::operator()(double* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { dhs_pack pack; pack(blockA, lhs, depth, rows, stride, offset); } template struct gemm_pack_rhs { void operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); }; template void gemm_pack_rhs ::operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { dhs_pack pack; pack(blockB, rhs, depth, cols, stride, offset); } template struct gemm_pack_rhs { void operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); }; template void gemm_pack_rhs ::operator()(double* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { dhs_pack pack; pack(blockB, rhs, depth, cols, stride, offset); } template struct gemm_pack_lhs { void operator()(float* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); }; template void gemm_pack_lhs ::operator()(float* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { dhs_pack pack; pack(blockA, lhs, depth, rows, stride, offset); } template struct gemm_pack_lhs { void operator()(float* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); }; template void gemm_pack_lhs ::operator()(float* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { dhs_pack pack; pack(blockA, lhs, depth, rows, stride, offset); } template struct gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode> { void operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); }; template void gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode> ::operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { dhs_cpack pack; pack(blockA, lhs, depth, rows, stride, offset); } template struct gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode> { void operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); }; template void gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode> ::operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { dhs_cpack pack; pack(blockA, lhs, depth, rows, stride, offset); } template struct gemm_pack_rhs { void operator()(float* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); }; template void gemm_pack_rhs ::operator()(float* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { dhs_pack pack; pack(blockB, rhs, depth, cols, stride, offset); } template struct gemm_pack_rhs { void operator()(float* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); }; template void gemm_pack_rhs ::operator()(float* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { dhs_pack pack; pack(blockB, rhs, depth, cols, stride, offset); } template struct gemm_pack_rhs, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode> { void operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); }; template void gemm_pack_rhs, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode> ::operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { dhs_cpack pack; pack(blockB, rhs, depth, cols, stride, offset); } template struct gemm_pack_rhs, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode> { void operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); }; template void gemm_pack_rhs, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode> ::operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { dhs_cpack pack; pack(blockB, rhs, depth, cols, stride, offset); } template struct gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode> { void operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); }; template void gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, RowMajor, Conjugate, PanelMode> ::operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { dhs_cpack pack; pack(blockA, lhs, depth, rows, stride, offset); } template struct gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode> { void operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride=0, Index offset=0); }; template void gemm_pack_lhs, Index, DataMapper, Pack1, Pack2, Packet, ColMajor, Conjugate, PanelMode> ::operator()(std::complex* blockA, const DataMapper& lhs, Index depth, Index rows, Index stride, Index offset) { dhs_cpack pack; pack(blockA, lhs, depth, rows, stride, offset); } template struct gemm_pack_rhs, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode> { void operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); }; template void gemm_pack_rhs, Index, DataMapper, nr, ColMajor, Conjugate, PanelMode> ::operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { dhs_cpack pack; pack(blockB, rhs, depth, cols, stride, offset); } template struct gemm_pack_rhs, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode> { void operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride=0, Index offset=0); }; template void gemm_pack_rhs, Index, DataMapper, nr, RowMajor, Conjugate, PanelMode> ::operator()(std::complex* blockB, const DataMapper& rhs, Index depth, Index cols, Index stride, Index offset) { dhs_cpack pack; pack(blockB, rhs, depth, cols, stride, offset); } // ********* gebp specializations ********* template struct gebp_kernel { typedef typename quad_traits::vectortype Packet; typedef typename quad_traits::rhstype RhsPacket; void operator()(const DataMapper& res, const float* blockA, const float* blockB, Index rows, Index depth, Index cols, float alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); }; template void gebp_kernel ::operator()(const DataMapper& res, const float* blockA, const float* blockB, Index rows, Index depth, Index cols, float alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) { const Index accRows = quad_traits::rows; const Index accCols = quad_traits::size; void (*gemm_function)(const DataMapper&, const float*, const float*, Index, Index, Index, float, Index, Index, Index, Index); #ifdef EIGEN_ALTIVEC_MMA_ONLY //generate with MMA only gemm_function = &Eigen::internal::gemmMMA; #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA) if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ gemm_function = &Eigen::internal::gemmMMA; } else{ gemm_function = &Eigen::internal::gemm; } #else gemm_function = &Eigen::internal::gemm; #endif gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); } template struct gebp_kernel, std::complex, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> { typedef Packet4f Packet; typedef Packet2cf Packetc; typedef Packet4f RhsPacket; void operator()(const DataMapper& res, const std::complex* blockA, const std::complex* blockB, Index rows, Index depth, Index cols, std::complex alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); }; template void gebp_kernel, std::complex, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> ::operator()(const DataMapper& res, const std::complex* blockA, const std::complex* blockB, Index rows, Index depth, Index cols, std::complex alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) { const Index accRows = quad_traits::rows; const Index accCols = quad_traits::size; void (*gemm_function)(const DataMapper&, const std::complex*, const std::complex*, Index, Index, Index, std::complex, Index, Index, Index, Index); #ifdef EIGEN_ALTIVEC_MMA_ONLY //generate with MMA only gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA) if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; } else{ gemm_function = &Eigen::internal::gemm_complex, std::complex, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; } #else gemm_function = &Eigen::internal::gemm_complex, std::complex, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; #endif gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); } template struct gebp_kernel, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> { typedef Packet4f Packet; typedef Packet2cf Packetc; typedef Packet4f RhsPacket; void operator()(const DataMapper& res, const float* blockA, const std::complex* blockB, Index rows, Index depth, Index cols, std::complex alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); }; template void gebp_kernel, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> ::operator()(const DataMapper& res, const float* blockA, const std::complex* blockB, Index rows, Index depth, Index cols, std::complex alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) { const Index accRows = quad_traits::rows; const Index accCols = quad_traits::size; void (*gemm_function)(const DataMapper&, const float*, const std::complex*, Index, Index, Index, std::complex, Index, Index, Index, Index); #ifdef EIGEN_ALTIVEC_MMA_ONLY //generate with MMA only gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA) if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; } else{ gemm_function = &Eigen::internal::gemm_complex, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; } #else gemm_function = &Eigen::internal::gemm_complex, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; #endif gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); } template struct gebp_kernel, float, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> { typedef Packet4f Packet; typedef Packet2cf Packetc; typedef Packet4f RhsPacket; void operator()(const DataMapper& res, const std::complex* blockA, const float* blockB, Index rows, Index depth, Index cols, std::complex alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); }; template void gebp_kernel, float, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> ::operator()(const DataMapper& res, const std::complex* blockA, const float* blockB, Index rows, Index depth, Index cols, std::complex alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) { const Index accRows = quad_traits::rows; const Index accCols = quad_traits::size; void (*gemm_function)(const DataMapper&, const std::complex*, const float*, Index, Index, Index, std::complex, Index, Index, Index, Index); #ifdef EIGEN_ALTIVEC_MMA_ONLY //generate with MMA only gemm_function = &Eigen::internal::gemm_complexMMA, float, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA) if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ gemm_function = &Eigen::internal::gemm_complexMMA, float, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; } else{ gemm_function = &Eigen::internal::gemm_complex, float, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; } #else gemm_function = &Eigen::internal::gemm_complex, float, std::complex, float, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; #endif gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); } template struct gebp_kernel { typedef typename quad_traits::vectortype Packet; typedef typename quad_traits::rhstype RhsPacket; void operator()(const DataMapper& res, const double* blockA, const double* blockB, Index rows, Index depth, Index cols, double alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); }; template void gebp_kernel ::operator()(const DataMapper& res, const double* blockA, const double* blockB, Index rows, Index depth, Index cols, double alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) { const Index accRows = quad_traits::rows; const Index accCols = quad_traits::size; void (*gemm_function)(const DataMapper&, const double*, const double*, Index, Index, Index, double, Index, Index, Index, Index); #ifdef EIGEN_ALTIVEC_MMA_ONLY //generate with MMA only gemm_function = &Eigen::internal::gemmMMA; #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA) if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ gemm_function = &Eigen::internal::gemmMMA; } else{ gemm_function = &Eigen::internal::gemm; } #else gemm_function = &Eigen::internal::gemm; #endif gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); } template struct gebp_kernel, std::complex, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> { typedef quad_traits::vectortype Packet; typedef Packet1cd Packetc; typedef quad_traits::rhstype RhsPacket; void operator()(const DataMapper& res, const std::complex* blockA, const std::complex* blockB, Index rows, Index depth, Index cols, std::complex alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); }; template void gebp_kernel, std::complex, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> ::operator()(const DataMapper& res, const std::complex* blockA, const std::complex* blockB, Index rows, Index depth, Index cols, std::complex alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) { const Index accRows = quad_traits::rows; const Index accCols = quad_traits::size; void (*gemm_function)(const DataMapper&, const std::complex*, const std::complex*, Index, Index, Index, std::complex, Index, Index, Index, Index); #ifdef EIGEN_ALTIVEC_MMA_ONLY //generate with MMA only gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA) if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; } else{ gemm_function = &Eigen::internal::gemm_complex, std::complex, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; } #else gemm_function = &Eigen::internal::gemm_complex, std::complex, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, false>; #endif gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); } template struct gebp_kernel, double, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> { typedef quad_traits::vectortype Packet; typedef Packet1cd Packetc; typedef quad_traits::rhstype RhsPacket; void operator()(const DataMapper& res, const std::complex* blockA, const double* blockB, Index rows, Index depth, Index cols, std::complex alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); }; template void gebp_kernel, double, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> ::operator()(const DataMapper& res, const std::complex* blockA, const double* blockB, Index rows, Index depth, Index cols, std::complex alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) { const Index accRows = quad_traits::rows; const Index accCols = quad_traits::size; void (*gemm_function)(const DataMapper&, const std::complex*, const double*, Index, Index, Index, std::complex, Index, Index, Index, Index); #ifdef EIGEN_ALTIVEC_MMA_ONLY //generate with MMA only gemm_function = &Eigen::internal::gemm_complexMMA, double, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA) if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ gemm_function = &Eigen::internal::gemm_complexMMA, double, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; } else{ gemm_function = &Eigen::internal::gemm_complex, double, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; } #else gemm_function = &Eigen::internal::gemm_complex, double, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, false, true>; #endif gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); } template struct gebp_kernel, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> { typedef quad_traits::vectortype Packet; typedef Packet1cd Packetc; typedef quad_traits::rhstype RhsPacket; void operator()(const DataMapper& res, const double* blockA, const std::complex* blockB, Index rows, Index depth, Index cols, std::complex alpha, Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0); }; template void gebp_kernel, Index, DataMapper, mr, nr, ConjugateLhs, ConjugateRhs> ::operator()(const DataMapper& res, const double* blockA, const std::complex* blockB, Index rows, Index depth, Index cols, std::complex alpha, Index strideA, Index strideB, Index offsetA, Index offsetB) { const Index accRows = quad_traits::rows; const Index accCols = quad_traits::size; void (*gemm_function)(const DataMapper&, const double*, const std::complex*, Index, Index, Index, std::complex, Index, Index, Index, Index); #ifdef EIGEN_ALTIVEC_MMA_ONLY //generate with MMA only gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; #elif defined(ALTIVEC_MMA_SUPPORT) && !defined(EIGEN_ALTIVEC_DISABLE_MMA) if (__builtin_cpu_supports ("arch_3_1") && __builtin_cpu_supports ("mma")){ gemm_function = &Eigen::internal::gemm_complexMMA, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; } else{ gemm_function = &Eigen::internal::gemm_complex, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; } #else gemm_function = &Eigen::internal::gemm_complex, std::complex, double, Index, Packet, Packetc, RhsPacket, DataMapper, accRows, accCols, ConjugateLhs, ConjugateRhs, true, false>; #endif gemm_function(res, blockA, blockB, rows, depth, cols, alpha, strideA, strideB, offsetA, offsetB); } } // end namespace internal } // end namespace Eigen #endif // EIGEN_MATRIX_PRODUCT_ALTIVEC_H