aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src/Tensor
diff options
context:
space:
mode:
authorGravatar Rasmus Munk Larsen <rmlarsen@google.com>2019-10-18 16:42:00 -0700
committerGravatar Rasmus Munk Larsen <rmlarsen@google.com>2019-10-18 16:42:00 -0700
commit668ab3fc474e54c7919eda4fbaf11f3a99246494 (patch)
tree3f2b80538739d85ebae042b0f9cafe163d8287e1 /unsupported/Eigen/CXX11/src/Tensor
parentdf0e8b81370f741c734e4f4187d029d6a8cb18f2 (diff)
Drop support for c++03 in Eigen tensor. Get rid of some code used to emulate c++11 functionality with older compilers.
Diffstat (limited to 'unsupported/Eigen/CXX11/src/Tensor')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorBlockV2.h27
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h10
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h5
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h7
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h22
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h2
6 files changed, 2 insertions, 71 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorBlockV2.h b/unsupported/Eigen/CXX11/src/Tensor/TensorBlockV2.h
index c85c4c6c8..e9086a7f1 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorBlockV2.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorBlockV2.h
@@ -51,13 +51,11 @@ EIGEN_ALWAYS_INLINE DSizes<IndexType, NumDims> strides(
return strides<Layout>(DSizes<IndexType, NumDims>(dimensions));
}
-#if EIGEN_HAS_CXX11
template <int Layout, std::ptrdiff_t... Indices>
EIGEN_STRONG_INLINE DSizes<std::ptrdiff_t, sizeof...(Indices)> strides(
const Sizes<Indices...>& sizes) {
return strides<Layout>(DSizes<std::ptrdiff_t, sizeof...(Indices)>(sizes));
}
-#endif
// -------------------------------------------------------------------------- //
// TensorBlockDescriptor specifies a block offset within a tensor and the block
@@ -185,7 +183,7 @@ class TensorBlockDescriptor {
Scalar* dst_base,
const DSizes<DstStridesIndexType, NumDims>& dst_strides) {
// DSizes constructor will do index type promotion if it's safe.
- AddDestinationBuffer<Layout>(*this, dst_base, Dimensions(dst_strides));
+ AddDestinationBuffer<Layout>(dst_base, Dimensions(dst_strides));
}
TensorBlockDescriptor& DropDestinationBuffer() {
@@ -285,11 +283,6 @@ class TensorBlockScratchAllocator {
// -------------------------------------------------------------------------- //
// TensorBlockKind represents all possible block kinds, that can be produced by
// TensorEvaluator::evalBlock function.
-#if !EIGEN_HAS_CXX11
-// To be able to use `TensorBlockKind::kExpr` in C++03 we need a namespace.
-// (Use of enumeration in a nested name specifier is a c++11 extension).
-namespace TensorBlockKind {
-#endif
enum TensorBlockKind {
// Tensor block that is a lazy expression that must be assigned to a
// destination using TensorBlockAssign.
@@ -313,9 +306,6 @@ enum TensorBlockKind {
// TensorBlockAssign or for constructing another block expression.
kMaterializedInOutput
};
-#if !EIGEN_HAS_CXX11
-} // namespace TensorBlockKind
-#endif
// -------------------------------------------------------------------------- //
// TensorBlockNotImplemented should be used to defined TensorBlock typedef in
@@ -361,9 +351,6 @@ struct XprScalar<void> {
template <typename Scalar, int NumDims, int Layout,
typename IndexType = Eigen::Index>
class TensorMaterializedBlock {
-#if !EIGEN_HAS_CXX11
- typedef internal::TensorBlockKind::TensorBlockKind TensorBlockKind;
-#endif
public:
typedef DSizes<IndexType, NumDims> Dimensions;
typedef TensorMap<const Tensor<Scalar, NumDims, Layout> > XprType;
@@ -543,9 +530,6 @@ class TensorMaterializedBlock {
template <typename UnaryOp, typename ArgTensorBlock>
class TensorCwiseUnaryBlock {
-#if !EIGEN_HAS_CXX11
- typedef internal::TensorBlockKind::TensorBlockKind TensorBlockKind;
-#endif
static const bool NoArgBlockAccess =
internal::is_void<typename ArgTensorBlock::XprType>::value;
@@ -578,9 +562,6 @@ class TensorCwiseUnaryBlock {
template <typename BinaryOp, typename LhsTensorBlock, typename RhsTensorBlock>
class TensorCwiseBinaryBlock {
-#if !EIGEN_HAS_CXX11
- typedef internal::TensorBlockKind::TensorBlockKind TensorBlockKind;
-#endif
static const bool NoArgBlockAccess =
internal::is_void<typename LhsTensorBlock::XprType>::value ||
@@ -628,9 +609,6 @@ class TensorCwiseBinaryBlock {
template <typename BlockFactory, typename ArgTensorBlock>
class TensorUnaryExprBlock {
-#if !EIGEN_HAS_CXX11
- typedef internal::TensorBlockKind::TensorBlockKind TensorBlockKind;
-#endif
typedef typename ArgTensorBlock::XprType ArgXprType;
static const bool NoArgBlockAccess = internal::is_void<ArgXprType>::value;
@@ -663,9 +641,6 @@ class TensorUnaryExprBlock {
template <typename BlockFactory, typename Arg1TensorBlock,
typename Arg2TensorBlock, typename Arg3TensorBlock>
class TensorTernaryExprBlock {
-#if !EIGEN_HAS_CXX11
- typedef internal::TensorBlockKind::TensorBlockKind TensorBlockKind;
-#endif
typedef typename Arg1TensorBlock::XprType Arg1XprType;
typedef typename Arg2TensorBlock::XprType Arg2XprType;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h
index 5b1abdc40..7f3394438 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h
@@ -53,14 +53,8 @@ static void initializeDeviceProp() {
// compile with nvcc, so we resort to atomics and thread fences instead.
// Note that if the caller uses a compiler that doesn't support c++11 we
// can't ensure that the initialization is thread safe.
-#if __cplusplus >= 201103L
static std::atomic<bool> first(true);
if (first.exchange(false)) {
-#else
- static bool first = true;
- if (first) {
- first = false;
-#endif
// We're the first thread to reach this point.
int num_devices;
gpuError_t status = gpuGetDeviceCount(&num_devices);
@@ -83,16 +77,12 @@ static void initializeDeviceProp() {
}
}
-#if __cplusplus >= 201103L
std::atomic_thread_fence(std::memory_order_release);
-#endif
m_devicePropInitialized = true;
} else {
// Wait for the other thread to inititialize the properties.
while (!m_devicePropInitialized) {
-#if __cplusplus >= 201103L
std::atomic_thread_fence(std::memory_order_acquire);
-#endif
EIGEN_SLEEP(1000);
}
}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
index 97ac96db1..11cec3d1c 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h
@@ -76,13 +76,8 @@ struct ExpressionHasTensorBroadcastingOp<
* Default strategy: the expression is evaluated sequentially with a single cpu
* thread, without vectorization and block evaluation.
*/
-#if EIGEN_HAS_CXX11
template <typename Expression, typename Device, bool Vectorizable,
TiledEvaluation Tiling>
-#else
- template <typename Expression, typename Device, bool Vectorizable,
- TiledEvaluation::TiledEvaluation Tiling>
-#endif
class TensorExecutor {
public:
typedef typename Expression::Index StorageIndex;
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h
index 7be007d94..55c7d6831 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h
@@ -10,10 +10,6 @@
#ifndef EIGEN_CXX11_TENSOR_TENSOR_FFT_H
#define EIGEN_CXX11_TENSOR_TENSOR_FFT_H
-// This code requires the ability to initialize arrays of constant
-// values directly inside a class.
-#if __cplusplus >= 201103L || EIGEN_COMP_MSVC >= 1900
-
namespace Eigen {
/** \class TensorFFT
@@ -671,7 +667,4 @@ struct TensorEvaluator<const TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir>, D
} // end namespace Eigen
-#endif // EIGEN_HAS_CONSTEXPR
-
-
#endif // EIGEN_CXX11_TENSOR_TENSOR_FFT_H
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
index 802cf21d8..5549cbdb2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h
@@ -155,19 +155,11 @@ struct IsVectorizable<GpuDevice, Expression> {
};
// Tiled evaluation strategy.
-#if !EIGEN_HAS_CXX11
-// To be able to use `TiledEvaluation::Off` in C++03 we need a namespace.
-// (Use of enumeration in a nested name specifier is a c++11 extension).
-namespace TiledEvaluation {
-#endif
enum TiledEvaluation {
Off = 0, // tiled evaluation is not supported
On = 1, // still work in progress (see TensorBlockV2.h)
Legacy = 2 // soon to be deprecated (see TensorBock.h)
};
-#if !EIGEN_HAS_CXX11
-} // namespace TiledEvaluation
-#endif
template <typename Device, typename Expression>
struct IsTileable {
@@ -182,30 +174,16 @@ struct IsTileable {
TensorEvaluator<Expression, Device>::BlockAccessV2 &&
TensorEvaluator<Expression, Device>::PreferBlockAccess;
-#if EIGEN_HAS_CXX11
static const TiledEvaluation value =
BlockAccessV2
? TiledEvaluation::On
: (BlockAccess ? TiledEvaluation::Legacy : TiledEvaluation::Off);
-#else
- static const TiledEvaluation::TiledEvaluation value =
- BlockAccessV2
- ? TiledEvaluation::On
- : (BlockAccess ? TiledEvaluation::Legacy : TiledEvaluation::Off);
-#endif
};
-#if EIGEN_HAS_CXX11
template <typename Expression, typename Device,
bool Vectorizable = IsVectorizable<Device, Expression>::value,
TiledEvaluation Tiling = IsTileable<Device, Expression>::value>
class TensorExecutor;
-#else
-template <typename Expression, typename Device,
- bool Vectorizable = IsVectorizable<Device, Expression>::value,
- TiledEvaluation::TiledEvaluation Tiling = IsTileable<Device, Expression>::value>
-class TensorExecutor;
-#endif
// TODO(ezhulenev): Add TiledEvaluation support to async executor.
template <typename Expression, typename Device, typename DoneCallback,
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
index cee7ae657..a5c293cf9 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h
@@ -445,7 +445,7 @@ __global__ void OuterReductionKernel(R, const S, I_, I_, typename S::CoeffReturn
*/
template <typename Op, typename CoeffReturnType>
struct ReductionReturnType {
-#if EIGEN_HAS_CXX11 && defined(EIGEN_USE_SYCL)
+#if defined(EIGEN_USE_SYCL)
typedef typename remove_const<decltype(std::declval<Op>().initialize())>::type type;
#else
typedef typename remove_const<CoeffReturnType>::type type;