aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2014-06-13 09:56:51 -0700
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2014-06-13 09:56:51 -0700
commit38ab7e6ed0491bd5a0c639f218d5ea4728bf1e81 (patch)
tree9f74f100b406a629c29676000d9ef46b5f2e7536 /unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
parentaa664eabb912a1b96e417e9a8d9c98f423b7fc23 (diff)
Reworked the expression evaluation mechanism in order to make it possible to efficiently compute convolutions and contractions in the future:
* The scheduling of computation is moved out the the assignment code and into a new TensorExecutor class * The assignment itself is now a regular node on the expression tree * The expression evaluators start by recursively evaluating all their subexpressions if needed
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h250
1 files changed, 104 insertions, 146 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
index 633a7a31b..a2a925775 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
@@ -10,10 +10,6 @@
#ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
#define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H
-#ifdef EIGEN_USE_THREADS
-#include <future>
-#endif
-
namespace Eigen {
/** \class TensorAssign
@@ -21,172 +17,134 @@ namespace Eigen {
*
* \brief The tensor assignment class.
*
- * This class is responsible for triggering the evaluation of the expressions
- * used on the lhs and rhs of an assignment operator and copy the result of
- * the evaluation of the rhs expression at the address computed during the
- * evaluation lhs expression.
- *
- * TODO: vectorization. For now the code only uses scalars
- * TODO: parallelisation using multithreading on cpu, or kernels on gpu.
+ * This class is represents the assignment of the values resulting from the evaluation of
+ * the rhs expression to the memory locations denoted by the lhs expression.
*/
namespace internal {
-
-// Default strategy: the expressions are evaluated with a single cpu thread.
-template<typename Derived1, typename Derived2, typename Device = DefaultDevice, bool Vectorizable = TensorEvaluator<Derived1, Device>::PacketAccess & TensorEvaluator<Derived2, Device>::PacketAccess>
-struct TensorAssign
+template<typename LhsXprType, typename RhsXprType>
+struct traits<TensorAssignOp<LhsXprType, RhsXprType> >
{
- typedef typename Derived1::Index Index;
- EIGEN_DEVICE_FUNC
- static inline void run(Derived1& dst, const Derived2& src, const Device& device = Device())
- {
- TensorEvaluator<Derived1, Device> evalDst(dst, device);
- TensorEvaluator<Derived2, Device> evalSrc(src, device);
- const Index size = dst.size();
- for (Index i = 0; i < size; ++i) {
- evalDst.coeffRef(i) = evalSrc.coeff(i);
- }
- }
+ typedef typename LhsXprType::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type Packet;
+ typedef typename traits<LhsXprType>::StorageKind StorageKind;
+ typedef typename promote_index_type<typename traits<LhsXprType>::Index,
+ typename traits<RhsXprType>::Index>::type Index;
+ typedef typename LhsXprType::Nested LhsNested;
+ typedef typename RhsXprType::Nested RhsNested;
+ typedef typename remove_reference<LhsNested>::type _LhsNested;
+ typedef typename remove_reference<RhsNested>::type _RhsNested;
+
+ enum {
+ Flags = 0,
+ };
};
+template<typename LhsXprType, typename RhsXprType>
+struct eval<TensorAssignOp<LhsXprType, RhsXprType>, Eigen::Dense>
+{
+ typedef const TensorAssignOp<LhsXprType, RhsXprType>& type;
+};
-template<typename Derived1, typename Derived2, typename Device>
-struct TensorAssign<Derived1, Derived2, Device, true>
+template<typename LhsXprType, typename RhsXprType>
+struct nested<TensorAssignOp<LhsXprType, RhsXprType>, 1, typename eval<TensorAssignOp<LhsXprType, RhsXprType> >::type>
{
- typedef typename Derived1::Index Index;
- static inline void run(Derived1& dst, const Derived2& src, const Device& device = Device())
- {
- TensorEvaluator<Derived1, Device> evalDst(dst, device);
- TensorEvaluator<Derived2, Device> evalSrc(src, device);
- const Index size = dst.size();
-
- static const int LhsStoreMode = TensorEvaluator<Derived1, Device>::IsAligned ? Aligned : Unaligned;
- static const int RhsLoadMode = TensorEvaluator<Derived2, Device>::IsAligned ? Aligned : Unaligned;
- static const int PacketSize = unpacket_traits<typename TensorEvaluator<Derived1, Device>::PacketReturnType>::size;
- const int VectorizedSize = (size / PacketSize) * PacketSize;
-
- for (Index i = 0; i < VectorizedSize; i += PacketSize) {
- evalDst.template writePacket<LhsStoreMode>(i, evalSrc.template packet<RhsLoadMode>(i));
- }
- for (Index i = VectorizedSize; i < size; ++i) {
- evalDst.coeffRef(i) = evalSrc.coeff(i);
- }
- }
+ typedef TensorAssignOp<LhsXprType, RhsXprType> type;
};
+} // end namespace internal
-// Multicore strategy: the index space is partitioned and each core is assigned to a partition
-#ifdef EIGEN_USE_THREADS
-template <typename LhsEval, typename RhsEval, typename Index, bool Vectorizable = LhsEval::PacketAccess & RhsEval::PacketAccess>
-struct EvalRange {
- static void run(LhsEval& dst, const RhsEval& src, const Index first, const Index last) {
- eigen_assert(last > first);
- for (Index i = first; i < last; ++i) {
- dst.coeffRef(i) = src.coeff(i);
- }
- }
-};
-template <typename LhsEval, typename RhsEval, typename Index>
-struct EvalRange<LhsEval, RhsEval, Index, true> {
- static void run(LhsEval& dst, const RhsEval& src, const Index first, const Index last) {
- eigen_assert(last > first);
-
- Index i = first;
- static const int PacketSize = unpacket_traits<typename LhsEval::PacketReturnType>::size;
- if (last - first > PacketSize) {
- static const int LhsStoreMode = LhsEval::IsAligned ? Aligned : Unaligned;
- static const int RhsLoadMode = RhsEval::IsAligned ? Aligned : Unaligned;
- eigen_assert(first % PacketSize == 0);
- Index lastPacket = last - (last % PacketSize);
- for (; i < lastPacket; i += PacketSize) {
- dst.template writePacket<LhsStoreMode>(i, src.template packet<RhsLoadMode>(i));
- }
- }
-
- for (; i < last; ++i) {
- dst.coeffRef(i) = src.coeff(i);
- }
- }
+template<typename LhsXprType, typename RhsXprType>
+class TensorAssignOp : public TensorBase<TensorAssignOp<LhsXprType, RhsXprType> >
+{
+ public:
+ typedef typename Eigen::internal::traits<TensorAssignOp>::Scalar Scalar;
+ typedef typename Eigen::internal::traits<TensorAssignOp>::Packet Packet;
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
+ typedef typename LhsXprType::CoeffReturnType CoeffReturnType;
+ typedef typename LhsXprType::PacketReturnType PacketReturnType;
+ typedef typename Eigen::internal::nested<TensorAssignOp>::type Nested;
+ typedef typename Eigen::internal::traits<TensorAssignOp>::StorageKind StorageKind;
+ typedef typename Eigen::internal::traits<TensorAssignOp>::Index Index;
+
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs, const RhsXprType& rhs)
+ : m_lhs_xpr(lhs), m_rhs_xpr(rhs) {}
+
+ /** \returns the nested expressions */
+ EIGEN_DEVICE_FUNC
+ typename internal::remove_all<typename LhsXprType::Nested>::type&
+ lhsExpression() const { return *((typename internal::remove_all<typename LhsXprType::Nested>::type*)&m_lhs_xpr); }
+
+ EIGEN_DEVICE_FUNC
+ const typename internal::remove_all<typename RhsXprType::Nested>::type&
+ rhsExpression() const { return m_rhs_xpr; }
+
+ protected:
+ typename internal::remove_all<typename LhsXprType::Nested>::type& m_lhs_xpr;
+ const typename internal::remove_all<typename RhsXprType::Nested>::type& m_rhs_xpr;
};
-template<typename Derived1, typename Derived2>
-struct TensorAssignMultiThreaded
+
+template<typename LeftArgType, typename RightArgType, typename Device>
+struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
{
- typedef typename Derived1::Index Index;
- static inline void run(Derived1& dst, const Derived2& src, const ThreadPoolDevice& device)
+ typedef TensorAssignOp<LeftArgType, RightArgType> XprType;
+
+ enum {
+ IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned,
+ PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
+ };
+
+ EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
+ m_leftImpl(op.lhsExpression(), device),
+ m_rightImpl(op.rhsExpression(), device)
+ { }
+
+ typedef typename XprType::Index Index;
+ typedef typename XprType::CoeffReturnType CoeffReturnType;
+ typedef typename XprType::PacketReturnType PacketReturnType;
+ typedef typename TensorEvaluator<RightArgType, Device>::Dimensions Dimensions;
+
+ EIGEN_DEVICE_FUNC const Dimensions& dimensions() const
{
- TensorEvaluator<Derived1, DefaultDevice> evalDst(dst, DefaultDevice());
- TensorEvaluator<Derived2, DefaultDevice> evalSrc(src, Defaultevice());
- const Index size = dst.size();
-
- static const bool Vectorizable = TensorEvaluator<Derived1, DefaultDevice>::PacketAccess & TensorEvaluator<Derived2, DefaultDevice>::PacketAccess;
- static const int PacketSize = Vectorizable ? unpacket_traits<typename TensorEvaluator<Derived1, DefaultDevice>::PacketReturnType>::size : 1;
-
- int blocksz = static_cast<int>(ceil(static_cast<float>(size)/device.numThreads()) + PacketSize - 1);
- const Index blocksize = std::max<Index>(PacketSize, (blocksz - (blocksz % PacketSize)));
- const Index numblocks = size / blocksize;
-
- Index i = 0;
- vector<std::future<void> > results;
- results.reserve(numblocks);
- for (int i = 0; i < numblocks; ++i) {
- results.push_back(std::async(std::launch::async, &EvalRange<TensorEvaluator<Derived1, DefaultDevice>, TensorEvaluator<Derived2, DefaultDevice>, Index>::run, evalDst, evalSrc, i*blocksize, (i+1)*blocksize));
- }
-
- for (int i = 0; i < numblocks; ++i) {
- results[i].get();
- }
-
- if (numblocks * blocksize < size) {
- EvalRange<TensorEvaluator<Derived1>, TensorEvaluator<Derived2>, Index>::run(evalDst, evalSrc, numblocks * blocksize, size);
- }
+ // TODO: use left impl instead if right impl dimensions are known at compile time.
+ return m_rightImpl.dimensions();
}
-};
-#endif
-
-// GPU: the evaluation of the expressions is offloaded to a GPU.
-#if defined(EIGEN_USE_GPU) && defined(__CUDACC__)
-template <typename LhsEvaluator, typename RhsEvaluator>
-__global__ void EigenMetaKernelNoCheck(LhsEvaluator evalDst, const RhsEvaluator evalSrc) {
- const int index = blockIdx.x * blockDim.x + threadIdx.x;
- evalDst.coeffRef(index) = evalSrc.coeff(index);
-}
-template <typename LhsEvaluator, typename RhsEvaluator>
-__global__ void EigenMetaKernelPeel(LhsEvaluator evalDst, const RhsEvaluator evalSrc, int peel_start_offset, int size) {
- const int index = peel_start_offset + blockIdx.x * blockDim.x + threadIdx.x;
- if (index < size) {
- evalDst.coeffRef(index) = evalSrc.coeff(index);
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalSubExprsIfNeeded() {
+ m_leftImpl.evalSubExprsIfNeeded();
+ m_rightImpl.evalSubExprsIfNeeded();
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
+ m_leftImpl.cleanup();
+ m_rightImpl.cleanup();
}
-}
-template<typename Derived1, typename Derived2>
-struct TensorAssignGpu
-{
- typedef typename Derived1::Index Index;
- static inline void run(Derived1& dst, const Derived2& src, const GpuDevice& device)
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) {
+ m_leftImpl.coeffRef(i) = m_rightImpl.coeff(i);
+ }
+ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) {
+ static const int LhsStoreMode = TensorEvaluator<LeftArgType, Device>::IsAligned ? Aligned : Unaligned;
+ static const int RhsLoadMode = TensorEvaluator<RightArgType, Device>::IsAligned ? Aligned : Unaligned;
+ m_leftImpl.template writePacket<LhsStoreMode>(i, m_rightImpl.template packet<RhsLoadMode>(i));
+ }
+ EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const
{
- TensorEvaluator<Derived1, GpuDevice> evalDst(dst, device);
- TensorEvaluator<Derived2, GpuDevice> evalSrc(src, device);
- const Index size = dst.size();
- const int block_size = std::min<int>(size, 32*32);
- const int num_blocks = size / block_size;
- EigenMetaKernelNoCheck<TensorEvaluator<Derived1, GpuDevice>, TensorEvaluator<Derived2, GpuDevice> > <<<num_blocks, block_size, 0, device.stream()>>>(evalDst, evalSrc);
-
- const int remaining_items = size % block_size;
- if (remaining_items > 0) {
- const int peel_start_offset = num_blocks * block_size;
- const int peel_block_size = std::min<int>(size, 32);
- const int peel_num_blocks = (remaining_items + peel_block_size - 1) / peel_block_size;
- EigenMetaKernelPeel<TensorEvaluator<Derived1, GpuDevice>, TensorEvaluator<Derived2, GpuDevice> > <<<peel_num_blocks, peel_block_size, 0, device.stream()>>>(evalDst, evalSrc, peel_start_offset, size);
- }
+ return m_leftImpl.coeff(index);
}
+ template<int LoadMode>
+ EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const
+ {
+ return m_leftImpl.template packet<LoadMode>(index);
+ }
+
+ private:
+ TensorEvaluator<LeftArgType, Device> m_leftImpl;
+ TensorEvaluator<RightArgType, Device> m_rightImpl;
};
-#endif
-} // end namespace internal
+}
-} // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H