From 7402fea0a8e63e3ea248257047c584afee8f8bde Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Fri, 16 May 2014 15:08:05 -0700 Subject: Vectorized the evaluation of tensor expression (using SSE, AVX, NEON, ...) Added the ability to parallelize the evaluation of a tensor expression over multiple cpu cores. Added the ability to offload the evaluation of a tensor expression to a GPU. --- unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h | 145 +++++++++++++++++++++- 1 file changed, 143 insertions(+), 2 deletions(-) (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h') diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h index f1df827f9..e69ff6188 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h @@ -10,6 +10,9 @@ #ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H #define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H +#ifdef EIGEN_USE_THREADS +#include +#endif namespace Eigen { @@ -28,7 +31,8 @@ namespace Eigen { */ namespace internal { -template +// Default strategy: the expressions are evaluated with a single cpu thread. +template::PacketAccess & TensorEvaluator::PacketAccess> struct TensorAssign { typedef typename Derived1::Index Index; @@ -38,13 +42,150 @@ struct TensorAssign TensorEvaluator evalDst(dst); TensorEvaluator evalSrc(src); const Index size = dst.size(); - for(Index i = 0; i < size; ++i) { + for (Index i = 0; i < size; ++i) { + evalDst.coeffRef(i) = evalSrc.coeff(i); + } + } +}; + + +template +struct TensorAssign +{ + typedef typename Derived1::Index Index; + EIGEN_DEVICE_FUNC + static inline void run(Derived1& dst, const Derived2& src) + { + TensorEvaluator evalDst(dst); + TensorEvaluator evalSrc(src); + const Index size = dst.size(); + + static const int LhsStoreMode = TensorEvaluator::IsAligned ? Aligned : Unaligned; + static const int RhsLoadMode = TensorEvaluator::IsAligned ? Aligned : Unaligned; + static const int PacketSize = unpacket_traits::PacketReturnType>::size; + static const int VectorizedSize = (size / PacketSize) * PacketSize; + + for (Index i = 0; i < VectorizedSize; i += PacketSize) { + evalDst.template writePacket(i, evalSrc.template packet(i)); + } + for (Index i = VectorizedSize; i < size; ++i) { evalDst.coeffRef(i) = evalSrc.coeff(i); } } }; + +// Multicore strategy: the index space is partitioned and each core is assigned to a partition +#ifdef EIGEN_USE_THREADS +template +struct EvalRange { + static void run(LhsEval& dst, const RhsEval& src, const Index first, const Index last) { + eigen_assert(last > first); + for (Index i = first; i < last; ++i) { + dst.coeffRef(i) = src.coeff(i); + } + } +}; + +template +struct EvalRange { + static void run(LhsEval& dst, const RhsEval& src, const Index first, const Index last) { + eigen_assert(last > first); + + Index i = first; + static const int PacketSize = unpacket_traits::size; + if (last - first > PacketSize) { + static const int LhsStoreMode = LhsEval::IsAligned ? Aligned : Unaligned; + static const int RhsLoadMode = RhsEval::IsAligned ? Aligned : Unaligned; + eigen_assert(first % PacketSize == 0); + Index lastPacket = last - (last % PacketSize); + for (; i < lastPacket; i += PacketSize) { + dst.template writePacket(i, src.template packet(i)); + } + } + + for (; i < last; ++i) { + dst.coeffRef(i) = src.coeff(i); + } + } +}; + +template +struct TensorAssignMultiThreaded +{ + typedef typename Derived1::Index Index; + static inline void run(Derived1& dst, const Derived2& src, const ThreadPoolDevice& device) + { + TensorEvaluator evalDst(dst); + TensorEvaluator evalSrc(src); + const Index size = dst.size(); + + static const bool Vectorizable = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess; + static const int PacketSize = Vectorizable ? unpacket_traits::PacketReturnType>::size : 1; + + int blocksz = static_cast(ceil(static_cast(size)/device.numThreads()) + PacketSize - 1); + const Index blocksize = std::max(PacketSize, (blocksz - (blocksz % PacketSize))); + const Index numblocks = size / blocksize; + + Index i = 0; + vector > results; + results.reserve(numblocks); + for (int i = 0; i < numblocks; ++i) { + results.push_back(std::async(std::launch::async, &EvalRange, TensorEvaluator, Index>::run, evalDst, evalSrc, i*blocksize, (i+1)*blocksize)); + } + + for (int i = 0; i < numblocks; ++i) { + results[i].get(); + } + + if (numblocks * blocksize < size) { + EvalRange, TensorEvaluator, Index>::run(evalDst, evalSrc, numblocks * blocksize, size); + } + } +}; +#endif + + +// GPU: the evaluation of the expressions is offloaded to a GPU. +#ifdef EIGEN_USE_GPU +template +__global__ void EigenMetaKernelNoCheck(LhsEvaluator evalDst, const RhsEvaluator evalSrc) { + const int index = blockIdx.x * blockDim.x + threadIdx.x; + evalDst.coeffRef(index) = evalSrc.coeff(index); +} +template +__global__ void EigenMetaKernelPeel(LhsEvaluator evalDst, const RhsEvaluator evalSrc, int peel_start_offset, int size) { + const int index = peel_start_offset + blockIdx.x * blockDim.x + threadIdx.x; + if (index < size) { + evalDst.coeffRef(index) = evalSrc.coeff(index); + } +} + +template +struct TensorAssignGpu +{ + typedef typename Derived1::Index Index; + static inline void run(Derived1& dst, const Derived2& src, const GpuDevice& device) + { + TensorEvaluator evalDst(dst); + TensorEvaluator evalSrc(src); + const Index size = dst.size(); + const int block_size = std::min(size, 32*32); + const int num_blocks = size / block_size; + EigenMetaKernelNoCheck, TensorEvaluator > <<>>(evalDst, evalSrc); + + const int remaining_items = size % block_size; + if (remaining_items > 0) { + const int peel_start_offset = num_blocks * block_size; + const int peel_block_size = std::min(size, 32); + const int peel_num_blocks = (remaining_items + peel_block_size - 1) / peel_block_size; + EigenMetaKernelPeel, TensorEvaluator > <<>>(evalDst, evalSrc, peel_start_offset, size); + } + } +}; +#endif + } // end namespace internal } // end namespace Eigen -- cgit v1.2.3