// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H #define EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H namespace Eigen { /** \class TensorEvaluator * \ingroup CXX11_Tensor_Module * * \brief The tensor evaluator classes. * * These classes are responsible for the evaluation of the tensor expression. * * TODO: add support for more types of expressions, in particular expressions * leading to lvalues (slicing, reshaping, etc...) */ // Generic evaluator template struct TensorEvaluator { typedef typename Derived::Index Index; typedef typename Derived::Scalar Scalar; typedef typename Derived::Scalar CoeffReturnType; typedef typename PacketType::type PacketReturnType; typedef typename Derived::Dimensions Dimensions; typedef Derived XprType; static const int PacketSize = PacketType::size; typedef typename internal::traits::template MakePointer::Type TensorPointerType; typedef StorageMemory Storage; typedef typename Storage::Type EvaluatorPointerType; // NumDimensions is -1 for variable dim tensors static const int NumCoords = internal::traits::NumDimensions > 0 ? internal::traits::NumDimensions : 0; enum { IsAligned = Derived::IsAligned, PacketAccess = (PacketType::size > 1), BlockAccess = internal::is_arithmetic::type>::value, PreferBlockAccess = false, Layout = Derived::Layout, CoordAccess = NumCoords > 0, RawAccess = true }; typedef typename internal::remove_const::type ScalarNoConst; //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// typedef internal::TensorBlockDescriptor TensorBlockDesc; typedef internal::TensorBlockScratchAllocator TensorBlockScratch; typedef typename internal::TensorMaterializedBlock TensorBlock; //===--------------------------------------------------------------------===// EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device) : m_data(device.get((const_cast(m.data())))), m_dims(m.dimensions()), m_device(device) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; } EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType dest) { if (!NumTraits::type>::RequireInitialization && dest) { m_device.memcpy((void*)(m_device.get(dest)), m_device.get(m_data), m_dims.TotalSize() * sizeof(Scalar)); return false; } return true; } #ifdef EIGEN_USE_THREADS template EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( EvaluatorPointerType dest, EvalSubExprsCallback done) { // TODO(ezhulenev): ThreadPoolDevice memcpy is blockign operation. done(evalSubExprsIfNeeded(dest)); } #endif // EIGEN_USE_THREADS EIGEN_STRONG_INLINE void cleanup() {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { eigen_assert(m_data != NULL); return m_data[index]; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) { eigen_assert(m_data != NULL); return m_data[index]; } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return internal::ploadt(m_data + index); } // Return a packet starting at `index` where `umask` specifies which elements // have to be loaded. Type/size of mask depends on PacketReturnType, e.g. for // Packet16f, `umask` is of type uint16_t and if a bit is 1, corresponding // float element will be loaded, otherwise 0 will be loaded. // Function has been templatized to enable Sfinae. template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::enable_if::masked_load_available, PacketReturnTypeT>::type partialPacket(Index index, typename internal::unpacket_traits::mask_t umask) const { return internal::ploadu(m_data + index, umask); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType& x) { return internal::pstoret(m_data + index, x); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array& coords) const { eigen_assert(m_data != NULL); if (static_cast(Layout) == static_cast(ColMajor)) { return m_data[m_dims.IndexOfColMajor(coords)]; } else { return m_data[m_dims.IndexOfRowMajor(coords)]; } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(const array& coords) { eigen_assert(m_data != NULL); if (static_cast(Layout) == static_cast(ColMajor)) { return m_data[m_dims.IndexOfColMajor(coords)]; } else { return m_data[m_dims.IndexOfRowMajor(coords)]; } } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketType::size); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements() const { return internal::TensorBlockResourceRequirements::any(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc& desc, TensorBlockScratch& scratch, bool /*root_of_expr_ast*/ = false) const { assert(m_data != NULL); return TensorBlock::materialize(m_data, m_dims, desc, scratch); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock( const TensorBlockDesc& desc, const TensorBlock& block) { assert(m_data != NULL); typedef typename TensorBlock::XprType TensorBlockExpr; typedef internal::TensorBlockAssignment TensorBlockAssign; TensorBlockAssign::Run( TensorBlockAssign::target(desc.dimensions(), internal::strides(m_dims), m_data, desc.offset()), block.expr()); } EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_data; } #ifdef EIGEN_USE_SYCL // binding placeholder accessors to a command group handler for SYCL EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { m_data.bind(cgh); } #endif protected: EvaluatorPointerType m_data; Dimensions m_dims; const Device EIGEN_DEVICE_REF m_device; }; namespace { template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T loadConstant(const T* address) { return *address; } // Use the texture cache on CUDA devices whenever possible #if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350 template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float loadConstant(const float* address) { return __ldg(address); } template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE double loadConstant(const double* address) { return __ldg(address); } template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Eigen::half loadConstant(const Eigen::half* address) { return Eigen::half(half_impl::raw_uint16_to_half(__ldg(&address->x))); } #endif #ifdef EIGEN_USE_SYCL // overload of load constant should be implemented here based on range access template T &loadConstant(const Eigen::TensorSycl::internal::RangeAccess &address) { return *address; } #endif } // Default evaluator for rvalues template struct TensorEvaluator { typedef typename Derived::Index Index; typedef typename Derived::Scalar Scalar; typedef typename Derived::Scalar CoeffReturnType; typedef typename PacketType::type PacketReturnType; typedef typename Derived::Dimensions Dimensions; typedef const Derived XprType; typedef typename internal::traits::template MakePointer::Type TensorPointerType; typedef StorageMemory Storage; typedef typename Storage::Type EvaluatorPointerType; typedef typename internal::remove_const::type ScalarNoConst; // NumDimensions is -1 for variable dim tensors static const int NumCoords = internal::traits::NumDimensions > 0 ? internal::traits::NumDimensions : 0; static const int PacketSize = PacketType::size; enum { IsAligned = Derived::IsAligned, PacketAccess = (PacketType::size > 1), BlockAccess = internal::is_arithmetic::value, PreferBlockAccess = false, Layout = Derived::Layout, CoordAccess = NumCoords > 0, RawAccess = true }; //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// typedef internal::TensorBlockDescriptor TensorBlockDesc; typedef internal::TensorBlockScratchAllocator TensorBlockScratch; typedef typename internal::TensorMaterializedBlock TensorBlock; //===--------------------------------------------------------------------===// EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device) : m_data(device.get(m.data())), m_dims(m.dimensions()), m_device(device) { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; } EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { if (!NumTraits::type>::RequireInitialization && data) { m_device.memcpy((void*)(m_device.get(data)),m_device.get(m_data), m_dims.TotalSize() * sizeof(Scalar)); return false; } return true; } #ifdef EIGEN_USE_THREADS template EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( EvaluatorPointerType dest, EvalSubExprsCallback done) { // TODO(ezhulenev): ThreadPoolDevice memcpy is a blockign operation. done(evalSubExprsIfNeeded(dest)); } #endif // EIGEN_USE_THREADS EIGEN_STRONG_INLINE void cleanup() { } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { eigen_assert(m_data != NULL); return loadConstant(m_data+index); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return internal::ploadt_ro(m_data + index); } // Return a packet starting at `index` where `umask` specifies which elements // have to be loaded. Type/size of mask depends on PacketReturnType, e.g. for // Packet16f, `umask` is of type uint16_t and if a bit is 1, corresponding // float element will be loaded, otherwise 0 will be loaded. // Function has been templatized to enable Sfinae. template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename internal::enable_if::masked_load_available, PacketReturnTypeT>::type partialPacket(Index index, typename internal::unpacket_traits::mask_t umask) const { return internal::ploadu(m_data + index, umask); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array& coords) const { eigen_assert(m_data != NULL); const Index index = (static_cast(Layout) == static_cast(ColMajor)) ? m_dims.IndexOfColMajor(coords) : m_dims.IndexOfRowMajor(coords); return loadConstant(m_data+index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketType::size); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements() const { return internal::TensorBlockResourceRequirements::any(); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc& desc, TensorBlockScratch& scratch, bool /*root_of_expr_ast*/ = false) const { assert(m_data != NULL); return TensorBlock::materialize(m_data, m_dims, desc, scratch); } EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_data; } #ifdef EIGEN_USE_SYCL // binding placeholder accessors to a command group handler for SYCL EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { m_data.bind(cgh); } #endif protected: EvaluatorPointerType m_data; Dimensions m_dims; const Device EIGEN_DEVICE_REF m_device; }; // -------------------- CwiseNullaryOp -------------------- template struct TensorEvaluator, Device> { typedef TensorCwiseNullaryOp XprType; TensorEvaluator(const XprType& op, const Device& device) : m_functor(op.functor()), m_argImpl(op.nestedExpression(), device), m_wrapper() { } typedef typename XprType::Index Index; typedef typename XprType::Scalar Scalar; typedef typename internal::traits::Scalar CoeffReturnType; typedef typename PacketType::type PacketReturnType; static const int PacketSize = PacketType::size; typedef typename TensorEvaluator::Dimensions Dimensions; typedef StorageMemory Storage; typedef typename Storage::Type EvaluatorPointerType; enum { IsAligned = true, PacketAccess = internal::functor_traits::PacketAccess #ifdef EIGEN_USE_SYCL && (PacketType::size >1) #endif , BlockAccess = false, PreferBlockAccess = false, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false }; //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// typedef internal::TensorBlockNotImplemented TensorBlock; //===--------------------------------------------------------------------===// EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); } EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { return true; } #ifdef EIGEN_USE_THREADS template EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( EvaluatorPointerType, EvalSubExprsCallback done) { done(true); } #endif // EIGEN_USE_THREADS EIGEN_STRONG_INLINE void cleanup() { } EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const { return m_wrapper(m_functor, index); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return m_wrapper.template packetOp(m_functor, index); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketType::size); } EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } #ifdef EIGEN_USE_SYCL // binding placeholder accessors to a command group handler for SYCL EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { m_argImpl.bind(cgh); } #endif private: const NullaryOp m_functor; TensorEvaluator m_argImpl; const internal::nullary_wrapper m_wrapper; }; // -------------------- CwiseUnaryOp -------------------- template struct TensorEvaluator, Device> { typedef TensorCwiseUnaryOp XprType; enum { IsAligned = TensorEvaluator::IsAligned, PacketAccess = int(TensorEvaluator::PacketAccess) & int(internal::functor_traits::PacketAccess), BlockAccess = TensorEvaluator::BlockAccess, PreferBlockAccess = TensorEvaluator::PreferBlockAccess, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false }; TensorEvaluator(const XprType& op, const Device& device) : m_device(device), m_functor(op.functor()), m_argImpl(op.nestedExpression(), device) { } typedef typename XprType::Index Index; typedef typename XprType::Scalar Scalar; typedef typename internal::remove_const::type ScalarNoConst; typedef typename internal::traits::Scalar CoeffReturnType; typedef typename PacketType::type PacketReturnType; static const int PacketSize = PacketType::size; typedef typename TensorEvaluator::Dimensions Dimensions; typedef StorageMemory Storage; typedef typename Storage::Type EvaluatorPointerType; static const int NumDims = internal::array_size::value; //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// typedef internal::TensorBlockDescriptor TensorBlockDesc; typedef internal::TensorBlockScratchAllocator TensorBlockScratch; typedef typename TensorEvaluator::TensorBlock ArgTensorBlock; typedef internal::TensorCwiseUnaryBlock TensorBlock; //===--------------------------------------------------------------------===// EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); } EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { m_argImpl.evalSubExprsIfNeeded(NULL); return true; } #ifdef EIGEN_USE_THREADS template EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( EvaluatorPointerType, EvalSubExprsCallback done) { m_argImpl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); } #endif // EIGEN_USE_THREADS EIGEN_STRONG_INLINE void cleanup() { m_argImpl.cleanup(); } EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const { return m_functor(m_argImpl.coeff(index)); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return m_functor.packetOp(m_argImpl.template packet(index)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { const double functor_cost = internal::functor_traits::Cost; return m_argImpl.costPerCoeff(vectorized) + TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements() const { static const double functor_cost = internal::functor_traits::Cost; return m_argImpl.getResourceRequirements().addCostPerCoeff( {0, 0, functor_cost / PacketSize}); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc& desc, TensorBlockScratch& scratch, bool /*root_of_expr_ast*/ = false) const { return TensorBlock(m_argImpl.block(desc, scratch), m_functor); } EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } #ifdef EIGEN_USE_SYCL // binding placeholder accessors to a command group handler for SYCL EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const{ m_argImpl.bind(cgh); } #endif private: const Device EIGEN_DEVICE_REF m_device; const UnaryOp m_functor; TensorEvaluator m_argImpl; }; // -------------------- CwiseBinaryOp -------------------- template struct TensorEvaluator, Device> { typedef TensorCwiseBinaryOp XprType; enum { IsAligned = int(TensorEvaluator::IsAligned) & int(TensorEvaluator::IsAligned), PacketAccess = int(TensorEvaluator::PacketAccess) & int(TensorEvaluator::PacketAccess) & int(internal::functor_traits::PacketAccess), BlockAccess = int(TensorEvaluator::BlockAccess) & int(TensorEvaluator::BlockAccess), PreferBlockAccess = int(TensorEvaluator::PreferBlockAccess) | int(TensorEvaluator::PreferBlockAccess), Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false }; TensorEvaluator(const XprType& op, const Device& device) : m_device(device), m_functor(op.functor()), m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device) { EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout) || internal::traits::NumDimensions <= 1), YOU_MADE_A_PROGRAMMING_MISTAKE); eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions())); } typedef typename XprType::Index Index; typedef typename XprType::Scalar Scalar; typedef typename internal::traits::Scalar CoeffReturnType; typedef typename PacketType::type PacketReturnType; static const int PacketSize = PacketType::size; typedef typename TensorEvaluator::Dimensions Dimensions; typedef StorageMemory Storage; typedef typename Storage::Type EvaluatorPointerType; static const int NumDims = internal::array_size< typename TensorEvaluator::Dimensions>::value; //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// typedef internal::TensorBlockDescriptor TensorBlockDesc; typedef internal::TensorBlockScratchAllocator TensorBlockScratch; typedef typename TensorEvaluator::TensorBlock LeftTensorBlock; typedef typename TensorEvaluator::TensorBlock RightTensorBlock; typedef internal::TensorCwiseBinaryBlock TensorBlock; //===--------------------------------------------------------------------===// EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { // TODO: use right impl instead if right impl dimensions are known at compile time. return m_leftImpl.dimensions(); } EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { m_leftImpl.evalSubExprsIfNeeded(NULL); m_rightImpl.evalSubExprsIfNeeded(NULL); return true; } #ifdef EIGEN_USE_THREADS template EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( EvaluatorPointerType, EvalSubExprsCallback done) { // TODO(ezhulenev): Evaluate two expression in parallel? m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done](bool) { m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); }); } #endif // EIGEN_USE_THREADS EIGEN_STRONG_INLINE void cleanup() { m_leftImpl.cleanup(); m_rightImpl.cleanup(); } EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const { return m_functor(m_leftImpl.coeff(index), m_rightImpl.coeff(index)); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return m_functor.packetOp(m_leftImpl.template packet(index), m_rightImpl.template packet(index)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { const double functor_cost = internal::functor_traits::Cost; return m_leftImpl.costPerCoeff(vectorized) + m_rightImpl.costPerCoeff(vectorized) + TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements() const { static const double functor_cost = internal::functor_traits::Cost; return internal::TensorBlockResourceRequirements::merge( m_leftImpl.getResourceRequirements(), m_rightImpl.getResourceRequirements()) .addCostPerCoeff({0, 0, functor_cost / PacketSize}); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc& desc, TensorBlockScratch& scratch, bool /*root_of_expr_ast*/ = false) const { desc.DropDestinationBuffer(); return TensorBlock(m_leftImpl.block(desc, scratch), m_rightImpl.block(desc, scratch), m_functor); } EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } #ifdef EIGEN_USE_SYCL // binding placeholder accessors to a command group handler for SYCL EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { m_leftImpl.bind(cgh); m_rightImpl.bind(cgh); } #endif private: const Device EIGEN_DEVICE_REF m_device; const BinaryOp m_functor; TensorEvaluator m_leftImpl; TensorEvaluator m_rightImpl; }; // -------------------- CwiseTernaryOp -------------------- template struct TensorEvaluator, Device> { typedef TensorCwiseTernaryOp XprType; enum { IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, PacketAccess = TensorEvaluator::PacketAccess && TensorEvaluator::PacketAccess && TensorEvaluator::PacketAccess && internal::functor_traits::PacketAccess, BlockAccess = false, PreferBlockAccess = TensorEvaluator::PreferBlockAccess || TensorEvaluator::PreferBlockAccess || TensorEvaluator::PreferBlockAccess, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false }; TensorEvaluator(const XprType& op, const Device& device) : m_functor(op.functor()), m_arg1Impl(op.arg1Expression(), device), m_arg2Impl(op.arg2Expression(), device), m_arg3Impl(op.arg3Expression(), device) { EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout) || internal::traits::NumDimensions <= 1), YOU_MADE_A_PROGRAMMING_MISTAKE); EIGEN_STATIC_ASSERT((internal::is_same::StorageKind, typename internal::traits::StorageKind>::value), STORAGE_KIND_MUST_MATCH) EIGEN_STATIC_ASSERT((internal::is_same::StorageKind, typename internal::traits::StorageKind>::value), STORAGE_KIND_MUST_MATCH) EIGEN_STATIC_ASSERT((internal::is_same::Index, typename internal::traits::Index>::value), STORAGE_INDEX_MUST_MATCH) EIGEN_STATIC_ASSERT((internal::is_same::Index, typename internal::traits::Index>::value), STORAGE_INDEX_MUST_MATCH) eigen_assert(dimensions_match(m_arg1Impl.dimensions(), m_arg2Impl.dimensions()) && dimensions_match(m_arg1Impl.dimensions(), m_arg3Impl.dimensions())); } typedef typename XprType::Index Index; typedef typename XprType::Scalar Scalar; typedef typename internal::traits::Scalar CoeffReturnType; typedef typename PacketType::type PacketReturnType; static const int PacketSize = PacketType::size; typedef typename TensorEvaluator::Dimensions Dimensions; typedef StorageMemory Storage; typedef typename Storage::Type EvaluatorPointerType; //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// typedef internal::TensorBlockNotImplemented TensorBlock; //===--------------------------------------------------------------------===// EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { // TODO: use arg2 or arg3 dimensions if they are known at compile time. return m_arg1Impl.dimensions(); } EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { m_arg1Impl.evalSubExprsIfNeeded(NULL); m_arg2Impl.evalSubExprsIfNeeded(NULL); m_arg3Impl.evalSubExprsIfNeeded(NULL); return true; } EIGEN_STRONG_INLINE void cleanup() { m_arg1Impl.cleanup(); m_arg2Impl.cleanup(); m_arg3Impl.cleanup(); } EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const { return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index)); } template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { return m_functor.packetOp(m_arg1Impl.template packet(index), m_arg2Impl.template packet(index), m_arg3Impl.template packet(index)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { const double functor_cost = internal::functor_traits::Cost; return m_arg1Impl.costPerCoeff(vectorized) + m_arg2Impl.costPerCoeff(vectorized) + m_arg3Impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); } EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } #ifdef EIGEN_USE_SYCL // binding placeholder accessors to a command group handler for SYCL EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { m_arg1Impl.bind(cgh); m_arg2Impl.bind(cgh); m_arg3Impl.bind(cgh); } #endif private: const TernaryOp m_functor; TensorEvaluator m_arg1Impl; TensorEvaluator m_arg2Impl; TensorEvaluator m_arg3Impl; }; // -------------------- SelectOp -------------------- template struct TensorEvaluator, Device> { typedef TensorSelectOp XprType; typedef typename XprType::Scalar Scalar; enum { IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, PacketAccess = TensorEvaluator::PacketAccess & TensorEvaluator::PacketAccess & PacketType::HasBlend, BlockAccess = TensorEvaluator::BlockAccess && TensorEvaluator::BlockAccess && TensorEvaluator::BlockAccess, PreferBlockAccess = TensorEvaluator::PreferBlockAccess || TensorEvaluator::PreferBlockAccess || TensorEvaluator::PreferBlockAccess, Layout = TensorEvaluator::Layout, CoordAccess = false, // to be implemented RawAccess = false }; TensorEvaluator(const XprType& op, const Device& device) : m_condImpl(op.ifExpression(), device), m_thenImpl(op.thenExpression(), device), m_elseImpl(op.elseExpression(), device) { EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); eigen_assert(dimensions_match(m_condImpl.dimensions(), m_thenImpl.dimensions())); eigen_assert(dimensions_match(m_thenImpl.dimensions(), m_elseImpl.dimensions())); } typedef typename XprType::Index Index; typedef typename internal::traits::Scalar CoeffReturnType; typedef typename PacketType::type PacketReturnType; static const int PacketSize = PacketType::size; typedef typename TensorEvaluator::Dimensions Dimensions; typedef StorageMemory Storage; typedef typename Storage::Type EvaluatorPointerType; static const int NumDims = internal::array_size::value; //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// typedef internal::TensorBlockDescriptor TensorBlockDesc; typedef internal::TensorBlockScratchAllocator TensorBlockScratch; typedef typename TensorEvaluator::TensorBlock IfArgTensorBlock; typedef typename TensorEvaluator::TensorBlock ThenArgTensorBlock; typedef typename TensorEvaluator::TensorBlock ElseArgTensorBlock; struct TensorSelectOpBlockFactory { template struct XprType { typedef TensorSelectOp type; }; template typename XprType::type expr( const IfArgXprType& if_expr, const ThenArgXprType& then_expr, const ElseArgXprType& else_expr) const { return typename XprType::type(if_expr, then_expr, else_expr); } }; typedef internal::TensorTernaryExprBlock TensorBlock; //===--------------------------------------------------------------------===// EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { // TODO: use then or else impl instead if they happen to be known at compile time. return m_condImpl.dimensions(); } EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { m_condImpl.evalSubExprsIfNeeded(NULL); m_thenImpl.evalSubExprsIfNeeded(NULL); m_elseImpl.evalSubExprsIfNeeded(NULL); return true; } #ifdef EIGEN_USE_THREADS template EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( EvaluatorPointerType, EvalSubExprsCallback done) { m_condImpl.evalSubExprsIfNeeded(nullptr, [this, done](bool) { m_thenImpl.evalSubExprsIfNeeded(nullptr, [this, done](bool) { m_elseImpl.evalSubExprsIfNeeded(nullptr, [done](bool) { done(true); }); }); }); } #endif // EIGEN_USE_THREADS EIGEN_STRONG_INLINE void cleanup() { m_condImpl.cleanup(); m_thenImpl.cleanup(); m_elseImpl.cleanup(); } EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const { return m_condImpl.coeff(index) ? m_thenImpl.coeff(index) : m_elseImpl.coeff(index); } template EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const { internal::Selector select; EIGEN_UNROLL_LOOP for (Index i = 0; i < PacketSize; ++i) { select.select[i] = m_condImpl.coeff(index+i); } return internal::pblend(select, m_thenImpl.template packet(index), m_elseImpl.template packet(index)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { return m_condImpl.costPerCoeff(vectorized) + m_thenImpl.costPerCoeff(vectorized) .cwiseMax(m_elseImpl.costPerCoeff(vectorized)); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements() const { auto then_req = m_thenImpl.getResourceRequirements(); auto else_req = m_elseImpl.getResourceRequirements(); auto merged_req = internal::TensorBlockResourceRequirements::merge(then_req, else_req); merged_req.cost_per_coeff = then_req.cost_per_coeff.cwiseMax(else_req.cost_per_coeff); return internal::TensorBlockResourceRequirements::merge( m_condImpl.getResourceRequirements(), merged_req); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc& desc, TensorBlockScratch& scratch, bool /*root_of_expr_ast*/ = false) const { // It's unsafe to pass destination buffer to underlying expressions, because // output might be aliased with one of the inputs. desc.DropDestinationBuffer(); return TensorBlock( m_condImpl.block(desc, scratch), m_thenImpl.block(desc, scratch), m_elseImpl.block(desc, scratch), TensorSelectOpBlockFactory()); } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return NULL; } #ifdef EIGEN_USE_SYCL // binding placeholder accessors to a command group handler for SYCL EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { m_condImpl.bind(cgh); m_thenImpl.bind(cgh); m_elseImpl.bind(cgh); } #endif private: TensorEvaluator m_condImpl; TensorEvaluator m_thenImpl; TensorEvaluator m_elseImpl; }; } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H