// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H #define EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H namespace Eigen { // MakePointer class is used as a container of the address space of the pointer // on the host and on the device. From the host side it generates the T* pointer // and when EIGEN_USE_SYCL is used it construct a buffer with a map_allocator to // T* m_data on the host. It is always called on the device. // Specialisation of MakePointer class for creating the sycl buffer with // map_allocator. template struct MakePointer { typedef T* Type; typedef const T* ConstType; }; template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T* constCast(const T* data) { return const_cast(data); } // The StorageMemory class is a container of the device specific pointer // used for refering to a Pointer on TensorEvaluator class. While the TensorExpression // is a device-agnostic type and need MakePointer class for type conversion, // the TensorEvaluator class can be specialized for a device, hence it is possible // to construct different types of temproray storage memory in TensorEvaluator // for different devices by specializing the following StorageMemory class. template struct StorageMemory: MakePointer {}; namespace internal{ template struct Pointer_type_promotion { static const bool val=false; }; template struct Pointer_type_promotion { static const bool val = true; }; template struct TypeConversion { typedef A* type; }; } template class MakePointer_ = MakePointer> class TensorMap; template class Tensor; template class TensorFixedSize; template class TensorRef; template class TensorBase; template class TensorCwiseNullaryOp; template class TensorCwiseUnaryOp; template class TensorCwiseBinaryOp; template class TensorCwiseTernaryOp; template class TensorSelectOp; template class MakePointer_ = MakePointer > class TensorReductionOp; template class TensorIndexTupleOp; template class TensorTupleReducerOp; template class TensorConcatenationOp; template class TensorContractionOp; template class TensorConversionOp; template class TensorConvolutionOp; template class TensorFFTOp; template class TensorPatchOp; template class TensorImagePatchOp; template class TensorVolumePatchOp; template class TensorBroadcastingOp; template class TensorChippingOp; template class TensorReshapingOp; template class TensorLayoutSwapOp; template class TensorSlicingOp; template class TensorReverseOp; template class TensorPaddingOp; template class TensorShufflingOp; template class TensorStridingOp; template class TensorStridingSlicingOp; template class TensorInflationOp; template class TensorGeneratorOp; template class TensorAssignOp; template class TensorScanOp; template class TensorTraceOp; template class TensorCustomUnaryOp; template class TensorCustomBinaryOp; template class MakePointer_ = MakePointer> class TensorEvalToOp; template class TensorForcedEvalOp; template class TensorDevice; template class TensorAsyncDevice; template struct TensorEvaluator; struct NoOpOutputKernel; struct DefaultDevice; struct ThreadPoolDevice; struct GpuDevice; struct SyclDevice; #ifdef EIGEN_USE_SYCL template struct MakeSYCLPointer { typedef Eigen::TensorSycl::internal::RangeAccess Type; }; template EIGEN_STRONG_INLINE const Eigen::TensorSycl::internal::RangeAccess& constCast(const Eigen::TensorSycl::internal::RangeAccess& data) { return data; } template struct StorageMemory : MakeSYCLPointer {}; template struct StorageMemory : StorageMemory {}; namespace TensorSycl { namespace internal{ template class GenericNondeterministicReducer; } } #endif enum FFTResultType { RealPart = 0, ImagPart = 1, BothParts = 2 }; enum FFTDirection { FFT_FORWARD = 0, FFT_REVERSE = 1 }; namespace internal { template struct IsVectorizable { static const bool value = TensorEvaluator::PacketAccess; }; template struct IsVectorizable { static const bool value = TensorEvaluator::PacketAccess && TensorEvaluator::IsAligned; }; // Tiled evaluation strategy. enum TiledEvaluation { Off = 0, // tiled evaluation is not supported On = 1, // still work in progress (see TensorBlock.h) }; template struct IsTileable { // Check that block evaluation is supported and it's a preferred option (at // least one sub-expression has much faster block evaluation, e.g. // broadcasting). static const bool BlockAccess = TensorEvaluator::BlockAccess && TensorEvaluator::PreferBlockAccess; static const TiledEvaluation value = BlockAccess ? TiledEvaluation::On : TiledEvaluation::Off; }; template ::value, TiledEvaluation Tiling = IsTileable::value> class TensorExecutor; template ::value, TiledEvaluation Tiling = IsTileable::value> class TensorAsyncExecutor; } // end namespace internal } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H