aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--Eigen/src/Core/AssignEvaluator.h15
-rw-r--r--Eigen/src/Core/CoreEvaluators.h4
-rw-r--r--Eigen/src/Core/arch/CUDA/Half.h5
-rw-r--r--Eigen/src/Core/util/Macros.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h58
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h6
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h10
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h92
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h12
-rw-r--r--unsupported/Eigen/CXX11/src/util/CXX11Meta.h2
-rw-r--r--unsupported/Eigen/CXX11/src/util/MaxSizeVector.h2
-rw-r--r--unsupported/test/CMakeLists.txt43
-rw-r--r--unsupported/test/cxx11_float16.cpp36
-rw-r--r--unsupported/test/cxx11_tensor_argmax.cpp8
-rw-r--r--unsupported/test/cxx11_tensor_dimension.cpp13
-rw-r--r--unsupported/test/cxx11_tensor_empty.cpp8
-rw-r--r--unsupported/test/cxx11_tensor_fixed_size.cpp6
-rw-r--r--unsupported/test/cxx11_tensor_forced_eval.cpp9
-rw-r--r--unsupported/test/cxx11_tensor_map.cpp36
-rw-r--r--unsupported/test/cxx11_tensor_of_float16_cuda.cu63
-rw-r--r--unsupported/test/cxx11_tensor_simple.cpp5
29 files changed, 320 insertions, 137 deletions
diff --git a/Eigen/src/Core/AssignEvaluator.h b/Eigen/src/Core/AssignEvaluator.h
index 9d4b315a0..b1193e421 100644
--- a/Eigen/src/Core/AssignEvaluator.h
+++ b/Eigen/src/Core/AssignEvaluator.h
@@ -256,12 +256,13 @@ struct copy_using_evaluator_innervec_CompleteUnrolling
enum {
outer = Index / DstXprType::InnerSizeAtCompileTime,
inner = Index % DstXprType::InnerSizeAtCompileTime,
- JointAlignment = Kernel::AssignmentTraits::JointAlignment
+ JointAlignment = Kernel::AssignmentTraits::JointAlignment,
+ DefaultAlignment = unpacket_traits<PacketType>::alignment
};
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)
{
- kernel.template assignPacketByOuterInner<Aligned, JointAlignment, PacketType>(outer, inner);
+ kernel.template assignPacketByOuterInner<DefaultAlignment, JointAlignment, PacketType>(outer, inner);
enum { NextIndex = Index + unpacket_traits<PacketType>::size };
copy_using_evaluator_innervec_CompleteUnrolling<Kernel, NextIndex, Stop>::run(kernel);
}
@@ -277,9 +278,12 @@ template<typename Kernel, int Index_, int Stop>
struct copy_using_evaluator_innervec_InnerUnrolling
{
typedef typename Kernel::PacketType PacketType;
+ enum {
+ DefaultAlignment = unpacket_traits<PacketType>::alignment
+ };
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel, Index outer)
{
- kernel.template assignPacketByOuterInner<Aligned, Aligned, PacketType>(outer, Index_);
+ kernel.template assignPacketByOuterInner<DefaultAlignment, DefaultAlignment, PacketType>(outer, Index_);
enum { NextIndex = Index_ + unpacket_traits<PacketType>::size };
copy_using_evaluator_innervec_InnerUnrolling<Kernel, NextIndex, Stop>::run(kernel, outer);
}
@@ -433,6 +437,9 @@ template<typename Kernel>
struct dense_assignment_loop<Kernel, InnerVectorizedTraversal, NoUnrolling>
{
typedef typename Kernel::PacketType PacketType;
+ enum {
+ DefaultAlignment = unpacket_traits<PacketType>::alignment
+ };
EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE void run(Kernel &kernel)
{
const Index innerSize = kernel.innerSize();
@@ -440,7 +447,7 @@ struct dense_assignment_loop<Kernel, InnerVectorizedTraversal, NoUnrolling>
const Index packetSize = unpacket_traits<PacketType>::size;
for(Index outer = 0; outer < outerSize; ++outer)
for(Index inner = 0; inner < innerSize; inner+=packetSize)
- kernel.template assignPacketByOuterInner<Aligned, Aligned, PacketType>(outer, inner);
+ kernel.template assignPacketByOuterInner<DefaultAlignment, DefaultAlignment, PacketType>(outer, inner);
}
};
diff --git a/Eigen/src/Core/CoreEvaluators.h b/Eigen/src/Core/CoreEvaluators.h
index 388805f0d..932178f53 100644
--- a/Eigen/src/Core/CoreEvaluators.h
+++ b/Eigen/src/Core/CoreEvaluators.h
@@ -850,14 +850,14 @@ struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBa
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index row, Index col, const PacketType& x)
- {
+ {
return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x);
}
template<int StoreMode, typename PacketType>
EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketType& x)
- {
+ {
return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
RowsAtCompileTime == 1 ? index : 0,
x);
diff --git a/Eigen/src/Core/arch/CUDA/Half.h b/Eigen/src/Core/arch/CUDA/Half.h
index 6387f2870..060c2c805 100644
--- a/Eigen/src/Core/arch/CUDA/Half.h
+++ b/Eigen/src/Core/arch/CUDA/Half.h
@@ -46,6 +46,8 @@
// Make our own __half definition that is similar to CUDA's.
struct __half {
+ __half() {}
+ explicit __half(unsigned short raw) : x(raw) {}
unsigned short x;
};
@@ -292,7 +294,8 @@ static EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half float_to_half_rtne(float ff)
const FP32 f16max = { (127 + 16) << 23 };
const FP32 denorm_magic = { ((127 - 15) + (23 - 10) + 1) << 23 };
unsigned int sign_mask = 0x80000000u;
- __half o = { 0 };
+ __half o;
+ o.x = static_cast<unsigned short>(0x0u);
unsigned int sign = f.u & sign_mask;
f.u ^= sign;
diff --git a/Eigen/src/Core/util/Macros.h b/Eigen/src/Core/util/Macros.h
index e2bb147dc..acb936ebe 100644
--- a/Eigen/src/Core/util/Macros.h
+++ b/Eigen/src/Core/util/Macros.h
@@ -375,7 +375,7 @@
#define EIGEN_HAS_CONSTEXPR 1
#endif
#elif __has_feature(cxx_relaxed_constexpr) || (defined(__cplusplus) && __cplusplus >= 201402L) || \
- EIGEN_GNUC_AT_LEAST(4,8)
+ (EIGEN_GNUC_AT_LEAST(4,8) && (__cplusplus > 199711L))
#define EIGEN_HAS_CONSTEXPR 1
#endif
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
index 5abff0800..cb615c75b 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
@@ -36,7 +36,7 @@ struct traits<TensorAssignOp<LhsXprType, RhsXprType> >
static const int Layout = internal::traits<LhsXprType>::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
@@ -100,7 +100,7 @@ struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
- RawAccess = TensorEvaluator<LeftArgType, Device>::RawAccess,
+ RawAccess = TensorEvaluator<LeftArgType, Device>::RawAccess
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
index 97182258d..6f113b903 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
@@ -41,7 +41,7 @@ struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType> >
static const int Layout = traits<LhsXprType>::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
@@ -588,7 +588,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
enum {
- Layout = TensorEvaluator<LeftArgType, Device>::Layout,
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout
};
// Most of the code is assuming that both input tensors are ColMajor. If the
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h
index dbff660a9..6a3ef14ef 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h
@@ -543,12 +543,12 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
#define prefetch_lhs(reg, row, col) \
if (!CHECK_LHS_BOUNDARY) { \
if (col < k_size) { \
- reg =lhs.loadPacket(row, col); \
+ reg =lhs.loadPacket<Unaligned>(row, col); \
} \
} else { \
if (col < k_size) { \
if (row + 3 < m_size) { \
- reg =lhs.loadPacket(row, col); \
+ reg =lhs.loadPacket<Unaligned>(row, col); \
} else if (row + 2 < m_size) { \
reg.x =lhs(row + 0, col); \
reg.y =lhs(row + 1, col); \
@@ -578,7 +578,7 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
if (!CHECK_RHS_BOUNDARY) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
- rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0);
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@@ -593,7 +593,7 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
} else {
if (rhs_horiz0 < n_size) {
if ((rhs_vert + 3) < k_size) {
- rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0);
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
} else if ((rhs_vert + 2) < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
@@ -790,37 +790,37 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
if (!CHECK_LHS_BOUNDARY) {
if ((threadIdx.y/4+k+24) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8));
- lhs_pf2 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+16));
- lhs_pf3 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+24));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
+ lhs_pf3 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8));
- lhs_pf2 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+16));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
}
} else {
// just CHECK_LHS_BOUNDARY
if (lhs_vert + 3 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8));
- lhs_pf2 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+16));
- lhs_pf3 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+24));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
+ lhs_pf3 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8));
- lhs_pf2 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+16));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
}
} else if (lhs_vert + 2 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
@@ -909,8 +909,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
if (!CHECK_RHS_BOUNDARY) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
- rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0);
- rhs_pf1 = rhs.loadPacket(rhs_vert, rhs_horiz1);
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
+ rhs_pf1 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@@ -932,8 +932,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
if (rhs_horiz1 < n_size) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
- rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0);
- rhs_pf1 = rhs.loadPacket(rhs_vert, rhs_horiz1);
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
+ rhs_pf1 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@@ -954,7 +954,7 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
} else if (rhs_horiz0 < n_size) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
- rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0);
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
} else if ((rhs_vert + 2) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
index 392cb6e3d..b27e1a1b4 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
@@ -16,7 +16,7 @@ namespace internal {
enum {
Rhs = 0,
- Lhs = 1,
+ Lhs = 1
};
/*
@@ -233,7 +233,7 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar,
typedef typename Tensor::PacketReturnType Packet;
typedef typename unpacket_traits<Packet>::half HalfPacket;
- template <int AlignmentType = Alignment>
+ template <int AlignmentType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Packet loadPacket(Index i, Index j) const {
// whole method makes column major assumption
@@ -276,7 +276,7 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar,
return pload<Packet>(data);
}
- template <int AlignmentType = Alignment>
+ template <int AlignmentType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE HalfPacket loadHalfPacket(Index i, Index j) const {
// whole method makes column major assumption
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h
index ff3c5662d..091007ab7 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h
@@ -233,7 +233,7 @@ struct traits<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType> >
static const int Layout = traits<InputXprType>::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h b/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
index 4e8f86674..0f6dcedaa 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
@@ -34,25 +34,25 @@ class TensorOpCost {
template <typename ArgType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int MulCost() {
return internal::functor_traits<
- internal::scalar_product_op<ArgType, ArgType>>::Cost;
+ internal::scalar_product_op<ArgType, ArgType> >::Cost;
}
template <typename ArgType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int AddCost() {
- return internal::functor_traits<internal::scalar_sum_op<ArgType>>::Cost;
+ return internal::functor_traits<internal::scalar_sum_op<ArgType> >::Cost;
}
template <typename ArgType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int DivCost() {
return internal::functor_traits<
- internal::scalar_quotient_op<ArgType, ArgType>>::Cost;
+ internal::scalar_quotient_op<ArgType, ArgType> >::Cost;
}
template <typename ArgType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int ModCost() {
- return internal::functor_traits<internal::scalar_mod_op<ArgType>>::Cost;
+ return internal::functor_traits<internal::scalar_mod_op<ArgType> >::Cost;
}
template <typename SrcType, typename TargetType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int CastCost() {
return internal::functor_traits<
- internal::scalar_cast_op<SrcType, TargetType>>::Cost;
+ internal::scalar_cast_op<SrcType, TargetType> >::Cost;
}
TensorOpCost() : bytes_loaded_(0), bytes_stored_(0), compute_cycles_(0) {}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
index 977dcafb0..f0b8ac958 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
@@ -275,7 +275,7 @@ struct DSizes : array<DenseIndex, NumDims> {
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex TotalSize() const {
- return internal::array_prod(*static_cast<const Base*>(this));
+ return (NumDims == 0) ? 1 : internal::array_prod(*static_cast<const Base*>(this));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DSizes() {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
index 5c6748a43..c556fec0f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
@@ -34,7 +34,7 @@ struct traits<TensorEvalToOp<XprType> >
static const int Layout = XprTraits::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
index 49d849e23..8491c4ca2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
@@ -40,7 +40,7 @@ struct traits<TensorCwiseNullaryOp<NullaryOp, XprType> >
static const int Layout = XprTraits::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
@@ -163,7 +163,7 @@ struct traits<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> >
static const int Layout = XprTraits::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
index 9c0ed43b7..b27ee0084 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
@@ -128,7 +128,6 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
return m_storage.data()[0];
}
-
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const
@@ -137,8 +136,54 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
+#else
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
+ {
+ if (Options&RowMajor) {
+ const Index index = i1 + i0 * m_storage.dimensions()[1];
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + i1 * m_storage.dimensions()[0];
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
+ {
+ if (Options&RowMajor) {
+ const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
+ {
+ if (Options&RowMajor) {
+ const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
+ {
+ if (Options&RowMajor) {
+ const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
+ return m_storage.data()[index];
+ }
+ }
#endif
+
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
{
@@ -176,6 +221,51 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
+#else
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
+ {
+ if (Options&RowMajor) {
+ const Index index = i1 + i0 * m_storage.dimensions()[1];
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + i1 * m_storage.dimensions()[0];
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
+ {
+ if (Options&RowMajor) {
+ const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
+ {
+ if (Options&RowMajor) {
+ const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
+ {
+ if (Options&RowMajor) {
+ const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
+ return m_storage.data()[index];
+ }
+ }
#endif
EIGEN_DEVICE_FUNC
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
index 1ce53ad69..7ec757519 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
@@ -34,7 +34,7 @@ struct traits<TensorForcedEvalOp<XprType> >
static const int Layout = XprTraits::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h b/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
index 63a8476ef..cd0109ef4 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
@@ -181,7 +181,7 @@ template<typename ArgType, typename Device>
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor,
- CoordAccess = false, // to be implemented
+ CoordAccess = false // to be implemented
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h b/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
index 2f06f8442..b7597b3a5 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
@@ -40,7 +40,7 @@ class compute_tensor_flags
};
public:
- enum { ret = packet_access_bit};
+ enum { ret = packet_access_bit };
};
@@ -54,7 +54,7 @@ struct traits<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor;
enum {
Options = Options_,
- Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0 : LvalueBit),
+ Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0 : LvalueBit)
};
};
@@ -69,7 +69,7 @@ struct traits<TensorFixedSize<Scalar_, Dimensions, Options_, IndexType_> >
static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor;
enum {
Options = Options_,
- Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0: LvalueBit),
+ Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0: LvalueBit)
};
};
@@ -86,7 +86,7 @@ struct traits<TensorMap<PlainObjectType, Options_> >
static const int Layout = BaseTraits::Layout;
enum {
Options = Options_,
- Flags = BaseTraits::Flags,
+ Flags = BaseTraits::Flags
};
};
@@ -102,7 +102,7 @@ struct traits<TensorRef<PlainObjectType> >
static const int Layout = BaseTraits::Layout;
enum {
Options = BaseTraits::Options,
- Flags = BaseTraits::Flags,
+ Flags = BaseTraits::Flags
};
};
@@ -253,7 +253,7 @@ struct nested<const TensorRef<PlainObjectType> >
// Pc=0.
typedef enum {
PADDING_VALID = 1,
- PADDING_SAME = 2,
+ PADDING_SAME = 2
} PaddingType;
} // end namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/util/CXX11Meta.h b/unsupported/Eigen/CXX11/src/util/CXX11Meta.h
index f479590b9..ec27eddb8 100644
--- a/unsupported/Eigen/CXX11/src/util/CXX11Meta.h
+++ b/unsupported/Eigen/CXX11/src/util/CXX11Meta.h
@@ -535,7 +535,7 @@ InstType instantiate_by_c_array(ArrType* arr)
#else // Non C++11, fallback to emulation mode
-#include "src/Core/util/EmulateCXX11Meta.h"
+#include "EmulateCXX11Meta.h"
#endif
diff --git a/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h b/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
index 551124bae..961456f10 100644
--- a/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
+++ b/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
@@ -41,7 +41,7 @@ class MaxSizeVector {
// Construct a new MaxSizeVector, reserve and resize to n.
// Copy the init value to all elements.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- explicit MaxSizeVector(size_t n, const T& init)
+ MaxSizeVector(size_t n, const T& init)
: reserve_(n), size_(n),
data_(static_cast<T*>(internal::aligned_malloc(n * sizeof(T)))) {
for (size_t i = 0; i < n; ++i) { new (&data_[i]) T(init); }
diff --git a/unsupported/test/CMakeLists.txt b/unsupported/test/CMakeLists.txt
index eed9f079e..373ad627d 100644
--- a/unsupported/test/CMakeLists.txt
+++ b/unsupported/test/CMakeLists.txt
@@ -110,35 +110,48 @@ ei_add_test(minres)
ei_add_test(levenberg_marquardt)
ei_add_test(kronecker_product)
+# TODO: The following test names are prefixed with the cxx11 string, since historically
+# the tests depended on c++11. This isn't the case anymore so we ought to rename them.
+ei_add_test(cxx11_float16)
+ei_add_test(cxx11_tensor_dimension)
+ei_add_test(cxx11_tensor_map)
+ei_add_test(cxx11_tensor_assign)
+ei_add_test(cxx11_tensor_comparisons)
+ei_add_test(cxx11_tensor_forced_eval)
+ei_add_test(cxx11_tensor_math)
+ei_add_test(cxx11_tensor_const)
+ei_add_test(cxx11_tensor_intdiv)
+ei_add_test(cxx11_tensor_casts)
+ei_add_test(cxx11_tensor_empty)
+ei_add_test(cxx11_tensor_sugar)
+ei_add_test(cxx11_tensor_roundings)
+ei_add_test(cxx11_tensor_layout_swap)
+ei_add_test(cxx11_tensor_io)
+if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
+ # This test requires __uint128_t which is only available on 64bit systems
+ ei_add_test(cxx11_tensor_uint128)
+endif()
+
if(EIGEN_TEST_CXX11)
# It should be safe to always run these tests as there is some fallback code for
# older compiler that don't support cxx11.
set(CMAKE_CXX_STANDARD 11)
- ei_add_test(cxx11_float16)
ei_add_test(cxx11_eventcount "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_runqueue "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_meta)
ei_add_test(cxx11_tensor_simple)
# ei_add_test(cxx11_tensor_symmetry)
- ei_add_test(cxx11_tensor_assign)
- ei_add_test(cxx11_tensor_dimension)
ei_add_test(cxx11_tensor_index_list)
ei_add_test(cxx11_tensor_mixed_indices)
- ei_add_test(cxx11_tensor_comparisons)
ei_add_test(cxx11_tensor_contraction)
ei_add_test(cxx11_tensor_convolution)
ei_add_test(cxx11_tensor_expr)
- ei_add_test(cxx11_tensor_math)
- ei_add_test(cxx11_tensor_forced_eval)
ei_add_test(cxx11_tensor_fixed_size)
- ei_add_test(cxx11_tensor_const)
ei_add_test(cxx11_tensor_of_const_values)
ei_add_test(cxx11_tensor_of_complex)
ei_add_test(cxx11_tensor_of_strings)
- ei_add_test(cxx11_tensor_intdiv)
ei_add_test(cxx11_tensor_lvalue)
- ei_add_test(cxx11_tensor_map)
ei_add_test(cxx11_tensor_broadcasting)
ei_add_test(cxx11_tensor_chipping)
ei_add_test(cxx11_tensor_concatenation)
@@ -156,23 +169,11 @@ if(EIGEN_TEST_CXX11)
ei_add_test(cxx11_tensor_thread_pool "-pthread" "${CMAKE_THREAD_LIBS_INIT}")
ei_add_test(cxx11_tensor_ref)
ei_add_test(cxx11_tensor_random)
- ei_add_test(cxx11_tensor_casts)
- ei_add_test(cxx11_tensor_roundings)
- ei_add_test(cxx11_tensor_reverse)
- ei_add_test(cxx11_tensor_layout_swap)
- ei_add_test(cxx11_tensor_io)
ei_add_test(cxx11_tensor_generator)
ei_add_test(cxx11_tensor_custom_op)
ei_add_test(cxx11_tensor_custom_index)
- ei_add_test(cxx11_tensor_sugar)
ei_add_test(cxx11_tensor_fft)
ei_add_test(cxx11_tensor_ifft)
- ei_add_test(cxx11_tensor_empty)
-
- if("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
- # This test requires __uint128_t which is only available on 64bit systems
- ei_add_test(cxx11_tensor_uint128)
- endif()
endif()
diff --git a/unsupported/test/cxx11_float16.cpp b/unsupported/test/cxx11_float16.cpp
index 273dcbc11..9a813653c 100644
--- a/unsupported/test/cxx11_float16.cpp
+++ b/unsupported/test/cxx11_float16.cpp
@@ -31,9 +31,9 @@ void test_conversion()
VERIFY_IS_EQUAL(half(1.19209e-07f).x, 0x0002);
// Verify round-to-nearest-even behavior.
- float val1 = float(half(__half{0x3c00}));
- float val2 = float(half(__half{0x3c01}));
- float val3 = float(half(__half{0x3c02}));
+ float val1 = float(half(__half(0x3c00)));
+ float val2 = float(half(__half(0x3c01)));
+ float val3 = float(half(__half(0x3c02)));
VERIFY_IS_EQUAL(half(0.5 * (val1 + val2)).x, 0x3c00);
VERIFY_IS_EQUAL(half(0.5 * (val2 + val3)).x, 0x3c02);
@@ -49,21 +49,21 @@ void test_conversion()
VERIFY_IS_EQUAL(half(true).x, 0x3c00);
// Conversion to float.
- VERIFY_IS_EQUAL(float(half(__half{0x0000})), 0.0f);
- VERIFY_IS_EQUAL(float(half(__half{0x3c00})), 1.0f);
+ VERIFY_IS_EQUAL(float(half(__half(0x0000))), 0.0f);
+ VERIFY_IS_EQUAL(float(half(__half(0x3c00))), 1.0f);
// Denormals.
- VERIFY_IS_APPROX(float(half(__half{0x8001})), -5.96046e-08f);
- VERIFY_IS_APPROX(float(half(__half{0x0001})), 5.96046e-08f);
- VERIFY_IS_APPROX(float(half(__half{0x0002})), 1.19209e-07f);
+ VERIFY_IS_APPROX(float(half(__half(0x8001))), -5.96046e-08f);
+ VERIFY_IS_APPROX(float(half(__half(0x0001))), 5.96046e-08f);
+ VERIFY_IS_APPROX(float(half(__half(0x0002))), 1.19209e-07f);
// NaNs and infinities.
VERIFY(!(numext::isinf)(float(half(65504.0f)))); // Largest finite number.
VERIFY(!(numext::isnan)(float(half(0.0f))));
- VERIFY((numext::isinf)(float(half(__half{0xfc00}))));
- VERIFY((numext::isnan)(float(half(__half{0xfc01}))));
- VERIFY((numext::isinf)(float(half(__half{0x7c00}))));
- VERIFY((numext::isnan)(float(half(__half{0x7c01}))));
+ VERIFY((numext::isinf)(float(half(__half(0xfc00)))));
+ VERIFY((numext::isnan)(float(half(__half(0xfc01)))));
+ VERIFY((numext::isinf)(float(half(__half(0x7c00)))));
+ VERIFY((numext::isnan)(float(half(__half(0x7c01)))));
#if !EIGEN_COMP_MSVC
// Visual Studio errors out on divisions by 0
@@ -73,12 +73,12 @@ void test_conversion()
#endif
// Exactly same checks as above, just directly on the half representation.
- VERIFY(!(numext::isinf)(half(__half{0x7bff})));
- VERIFY(!(numext::isnan)(half(__half{0x0000})));
- VERIFY((numext::isinf)(half(__half{0xfc00})));
- VERIFY((numext::isnan)(half(__half{0xfc01})));
- VERIFY((numext::isinf)(half(__half{0x7c00})));
- VERIFY((numext::isnan)(half(__half{0x7c01})));
+ VERIFY(!(numext::isinf)(half(__half(0x7bff))));
+ VERIFY(!(numext::isnan)(half(__half(0x0000))));
+ VERIFY((numext::isinf)(half(__half(0xfc00))));
+ VERIFY((numext::isnan)(half(__half(0xfc01))));
+ VERIFY((numext::isinf)(half(__half(0x7c00))));
+ VERIFY((numext::isnan)(half(__half(0x7c01))));
#if !EIGEN_COMP_MSVC
// Visual Studio errors out on divisions by 0
diff --git a/unsupported/test/cxx11_tensor_argmax.cpp b/unsupported/test/cxx11_tensor_argmax.cpp
index 482dfa7de..037767270 100644
--- a/unsupported/test/cxx11_tensor_argmax.cpp
+++ b/unsupported/test/cxx11_tensor_argmax.cpp
@@ -64,7 +64,7 @@ static void test_argmax_tuple_reducer()
Tensor<Tuple<DenseIndex, float>, 0, DataLayout> reduced;
DimensionList<DenseIndex, 4> dims;
reduced = index_tuples.reduce(
- dims, internal::ArgMaxTupleReducer<Tuple<DenseIndex, float>>());
+ dims, internal::ArgMaxTupleReducer<Tuple<DenseIndex, float> >());
Tensor<float, 0, DataLayout> maxi = tensor.maximum();
@@ -74,7 +74,7 @@ static void test_argmax_tuple_reducer()
for (int d = 0; d < 3; ++d) reduce_dims[d] = d;
Tensor<Tuple<DenseIndex, float>, 1, DataLayout> reduced_by_dims(7);
reduced_by_dims = index_tuples.reduce(
- reduce_dims, internal::ArgMaxTupleReducer<Tuple<DenseIndex, float>>());
+ reduce_dims, internal::ArgMaxTupleReducer<Tuple<DenseIndex, float> >());
Tensor<float, 1, DataLayout> max_by_dims = tensor.maximum(reduce_dims);
@@ -96,7 +96,7 @@ static void test_argmin_tuple_reducer()
Tensor<Tuple<DenseIndex, float>, 0, DataLayout> reduced;
DimensionList<DenseIndex, 4> dims;
reduced = index_tuples.reduce(
- dims, internal::ArgMinTupleReducer<Tuple<DenseIndex, float>>());
+ dims, internal::ArgMinTupleReducer<Tuple<DenseIndex, float> >());
Tensor<float, 0, DataLayout> mini = tensor.minimum();
@@ -106,7 +106,7 @@ static void test_argmin_tuple_reducer()
for (int d = 0; d < 3; ++d) reduce_dims[d] = d;
Tensor<Tuple<DenseIndex, float>, 1, DataLayout> reduced_by_dims(7);
reduced_by_dims = index_tuples.reduce(
- reduce_dims, internal::ArgMinTupleReducer<Tuple<DenseIndex, float>>());
+ reduce_dims, internal::ArgMinTupleReducer<Tuple<DenseIndex, float> >());
Tensor<float, 1, DataLayout> min_by_dims = tensor.minimum(reduce_dims);
diff --git a/unsupported/test/cxx11_tensor_dimension.cpp b/unsupported/test/cxx11_tensor_dimension.cpp
index ce78efe52..421e73693 100644
--- a/unsupported/test/cxx11_tensor_dimension.cpp
+++ b/unsupported/test/cxx11_tensor_dimension.cpp
@@ -37,7 +37,6 @@ static void test_fixed_size()
VERIFY_IS_EQUAL(dimensions.TotalSize(), 2*3*7);
}
-
static void test_match()
{
Eigen::DSizes<int, 3> dyn(2,3,7);
@@ -49,10 +48,22 @@ static void test_match()
VERIFY_IS_EQUAL(Eigen::dimensions_match(dyn1, dyn2), false);
}
+static void test_rank_zero()
+{
+ Eigen::Sizes<> scalar;
+ VERIFY_IS_EQUAL(scalar.TotalSize(), 1);
+ VERIFY_IS_EQUAL(scalar.rank(), 0);
+ VERIFY_IS_EQUAL(internal::array_prod(scalar), 1);
+
+ Eigen::DSizes<ptrdiff_t, 0> dscalar;
+ VERIFY_IS_EQUAL(dscalar.TotalSize(), 1);
+ VERIFY_IS_EQUAL(dscalar.rank(), 0);
+}
void test_cxx11_tensor_dimension()
{
CALL_SUBTEST(test_dynamic_size());
CALL_SUBTEST(test_fixed_size());
CALL_SUBTEST(test_match());
+ CALL_SUBTEST(test_rank_zero());
}
diff --git a/unsupported/test/cxx11_tensor_empty.cpp b/unsupported/test/cxx11_tensor_empty.cpp
index 9130fff35..d7eea42d7 100644
--- a/unsupported/test/cxx11_tensor_empty.cpp
+++ b/unsupported/test/cxx11_tensor_empty.cpp
@@ -24,10 +24,10 @@ static void test_empty_tensor()
static void test_empty_fixed_size_tensor()
{
- TensorFixedSize<float, Sizes<0>> source;
- TensorFixedSize<float, Sizes<0>> tgt1 = source;
- TensorFixedSize<float, Sizes<0>> tgt2(source);
- TensorFixedSize<float, Sizes<0>> tgt3;
+ TensorFixedSize<float, Sizes<0> > source;
+ TensorFixedSize<float, Sizes<0> > tgt1 = source;
+ TensorFixedSize<float, Sizes<0> > tgt2(source);
+ TensorFixedSize<float, Sizes<0> > tgt3;
tgt3 = tgt1;
tgt3 = tgt2;
}
diff --git a/unsupported/test/cxx11_tensor_fixed_size.cpp b/unsupported/test/cxx11_tensor_fixed_size.cpp
index 5fe164859..46d741b05 100644
--- a/unsupported/test/cxx11_tensor_fixed_size.cpp
+++ b/unsupported/test/cxx11_tensor_fixed_size.cpp
@@ -130,9 +130,9 @@ static void test_tensor_map()
static void test_2d()
{
float data1[6];
- TensorMap<TensorFixedSize<float, Sizes<2, 3> >> mat1(data1,2,3);
+ TensorMap<TensorFixedSize<float, Sizes<2, 3> > > mat1(data1,2,3);
float data2[6];
- TensorMap<TensorFixedSize<float, Sizes<2, 3>, RowMajor>> mat2(data2,2,3);
+ TensorMap<TensorFixedSize<float, Sizes<2, 3>, RowMajor> > mat2(data2,2,3);
VERIFY_IS_EQUAL((mat1.size()), 2*3);
VERIFY_IS_EQUAL(mat1.rank(), 2);
@@ -153,7 +153,7 @@ static void test_2d()
mat2(1,1) = -4.0;
mat2(1,2) = -5.0;
- TensorFixedSize<float, Sizes<2, 3>> mat3;
+ TensorFixedSize<float, Sizes<2, 3> > mat3;
TensorFixedSize<float, Sizes<2, 3>, RowMajor> mat4;
mat3 = mat1.abs();
mat4 = mat2.abs();
diff --git a/unsupported/test/cxx11_tensor_forced_eval.cpp b/unsupported/test/cxx11_tensor_forced_eval.cpp
index ad9de867d..45d7345e9 100644
--- a/unsupported/test/cxx11_tensor_forced_eval.cpp
+++ b/unsupported/test/cxx11_tensor_forced_eval.cpp
@@ -22,14 +22,15 @@ static void test_simple()
m1.setRandom();
m2.setRandom();
- TensorMap<Tensor<float, 2>> mat1(m1.data(), 3,3);
- TensorMap<Tensor<float, 2>> mat2(m2.data(), 3,3);
+ TensorMap<Tensor<float, 2> > mat1(m1.data(), 3,3);
+ TensorMap<Tensor<float, 2> > mat2(m2.data(), 3,3);
Tensor<float, 2> mat3(3,3);
mat3 = mat1;
typedef Tensor<float, 1>::DimensionPair DimPair;
- Eigen::array<DimPair, 1> dims({{DimPair(1, 0)}});
+ Eigen::array<DimPair, 1> dims;
+ dims[0] = DimPair(1, 0);
mat3 = mat3.contract(mat2, dims).eval();
@@ -60,7 +61,7 @@ static void test_const()
Eigen::array<int, 2> bcast;
bcast[0] = 3;
bcast[1] = 1;
- const TensorMap<Tensor<const float, 2>> input_tensor(input.data(), 3, 3);
+ const TensorMap<Tensor<const float, 2> > input_tensor(input.data(), 3, 3);
Tensor<float, 2> output_tensor= (input_tensor - input_tensor.maximum(depth_dim).eval().reshape(dims2d).broadcast(bcast));
for (int i = 0; i < 3; ++i) {
diff --git a/unsupported/test/cxx11_tensor_map.cpp b/unsupported/test/cxx11_tensor_map.cpp
index a8a095e38..3db0ee7c0 100644
--- a/unsupported/test/cxx11_tensor_map.cpp
+++ b/unsupported/test/cxx11_tensor_map.cpp
@@ -19,8 +19,8 @@ static void test_0d()
Tensor<int, 0> scalar1;
Tensor<int, 0, RowMajor> scalar2;
- TensorMap<Tensor<const int, 0>> scalar3(scalar1.data());
- TensorMap<Tensor<const int, 0, RowMajor>> scalar4(scalar2.data());
+ TensorMap<Tensor<const int, 0> > scalar3(scalar1.data());
+ TensorMap<Tensor<const int, 0, RowMajor> > scalar4(scalar2.data());
scalar1() = 7;
scalar2() = 13;
@@ -37,8 +37,8 @@ static void test_1d()
Tensor<int, 1> vec1(6);
Tensor<int, 1, RowMajor> vec2(6);
- TensorMap<Tensor<const int, 1>> vec3(vec1.data(), 6);
- TensorMap<Tensor<const int, 1, RowMajor>> vec4(vec2.data(), 6);
+ TensorMap<Tensor<const int, 1> > vec3(vec1.data(), 6);
+ TensorMap<Tensor<const int, 1, RowMajor> > vec4(vec2.data(), 6);
vec1(0) = 4; vec2(0) = 0;
vec1(1) = 8; vec2(1) = 1;
@@ -85,8 +85,8 @@ static void test_2d()
mat2(1,1) = 4;
mat2(1,2) = 5;
- TensorMap<Tensor<const int, 2>> mat3(mat1.data(), 2, 3);
- TensorMap<Tensor<const int, 2, RowMajor>> mat4(mat2.data(), 2, 3);
+ TensorMap<Tensor<const int, 2> > mat3(mat1.data(), 2, 3);
+ TensorMap<Tensor<const int, 2, RowMajor> > mat4(mat2.data(), 2, 3);
VERIFY_IS_EQUAL(mat3.rank(), 2);
VERIFY_IS_EQUAL(mat3.size(), 6);
@@ -129,8 +129,8 @@ static void test_3d()
}
}
- TensorMap<Tensor<const int, 3>> mat3(mat1.data(), 2, 3, 7);
- TensorMap<Tensor<const int, 3, RowMajor>> mat4(mat2.data(), array<DenseIndex, 3>{{2, 3, 7}});
+ TensorMap<Tensor<const int, 3> > mat3(mat1.data(), 2, 3, 7);
+ TensorMap<Tensor<const int, 3, RowMajor> > mat4(mat2.data(), 2, 3, 7);
VERIFY_IS_EQUAL(mat3.rank(), 3);
VERIFY_IS_EQUAL(mat3.size(), 2*3*7);
@@ -173,8 +173,8 @@ static void test_from_tensor()
}
}
- TensorMap<Tensor<int, 3>> mat3(mat1);
- TensorMap<Tensor<int, 3, RowMajor>> mat4(mat2);
+ TensorMap<Tensor<int, 3> > mat3(mat1);
+ TensorMap<Tensor<int, 3, RowMajor> > mat4(mat2);
VERIFY_IS_EQUAL(mat3.rank(), 3);
VERIFY_IS_EQUAL(mat3.size(), 2*3*7);
@@ -199,19 +199,23 @@ static void test_from_tensor()
}
}
- TensorFixedSize<int, Sizes<2,3,7>> mat5;
+ TensorFixedSize<int, Sizes<2,3,7> > mat5;
val = 0;
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 7; ++k) {
- mat5(i,j,k) = val;
+ array<ptrdiff_t, 3> coords;
+ coords[0] = i;
+ coords[1] = j;
+ coords[2] = k;
+ mat5(coords) = val;
val++;
}
}
}
- TensorMap<TensorFixedSize<int, Sizes<2,3,7>>> mat6(mat5);
+ TensorMap<TensorFixedSize<int, Sizes<2,3,7> > > mat6(mat5);
VERIFY_IS_EQUAL(mat6.rank(), 3);
VERIFY_IS_EQUAL(mat6.size(), 2*3*7);
@@ -233,8 +237,8 @@ static void test_from_tensor()
static int f(const TensorMap<Tensor<int, 3> >& tensor) {
// Size<0> empty;
- EIGEN_STATIC_ASSERT((internal::array_size<Sizes<>>::value == 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
- EIGEN_STATIC_ASSERT((internal::array_size<DSizes<int, 0>>::value == 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::array_size<Sizes<> >::value == 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
+ EIGEN_STATIC_ASSERT((internal::array_size<DSizes<int, 0> >::value == 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
Tensor<int, 0> result = tensor.sum();
return result();
}
@@ -253,7 +257,7 @@ static void test_casting()
}
}
- TensorMap<Tensor<int, 3>> map(tensor);
+ TensorMap<Tensor<int, 3> > map(tensor);
int sum1 = f(map);
int sum2 = f(tensor);
diff --git a/unsupported/test/cxx11_tensor_of_float16_cuda.cu b/unsupported/test/cxx11_tensor_of_float16_cuda.cu
index 154a72d5c..37fe3e9a4 100644
--- a/unsupported/test/cxx11_tensor_of_float16_cuda.cu
+++ b/unsupported/test/cxx11_tensor_of_float16_cuda.cu
@@ -134,6 +134,68 @@ void test_cuda_elementwise() {
gpu_device.deallocate(d_res_float);
}
+void test_cuda_trancendental() {
+ Eigen::CudaStreamDevice stream;
+ Eigen::GpuDevice gpu_device(&stream);
+ int num_elem = 101;
+
+ float* d_float1 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_float2 = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_res1_half = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_res1_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_res2_half = (float*)gpu_device.allocate(num_elem * sizeof(float));
+ float* d_res2_float = (float*)gpu_device.allocate(num_elem * sizeof(float));
+
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float1(
+ d_float1, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_float2(
+ d_float2, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res1_half(
+ d_res1_half, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res1_float(
+ d_res1_float, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res2_half(
+ d_res2_half, num_elem);
+ Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_res2_float(
+ d_res2_float, num_elem);
+
+ gpu_float1.device(gpu_device) = gpu_float1.random();
+ gpu_float2.device(gpu_device) = gpu_float2.random();
+ gpu_res1_float.device(gpu_device) = gpu_float1.exp();
+ gpu_res2_float.device(gpu_device) = gpu_float2.log();
+ gpu_res1_half.device(gpu_device) = gpu_float1.cast<Eigen::half>().exp().cast<float>();
+ gpu_res2_half.device(gpu_device) = gpu_float2.cast<Eigen::half>().log().cast<float>();
+
+ Tensor<float, 1> input1(num_elem);
+ Tensor<float, 1> half_prec1(num_elem);
+ Tensor<float, 1> full_prec1(num_elem);
+ Tensor<float, 1> input2(num_elem);
+ Tensor<float, 1> half_prec2(num_elem);
+ Tensor<float, 1> full_prec2(num_elem);
+ gpu_device.memcpyDeviceToHost(input1.data(), d_float1, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(input2.data(), d_float2, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(half_prec1.data(), d_res1_half, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(full_prec1.data(), d_res1_float, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(half_prec2.data(), d_res2_half, num_elem*sizeof(float));
+ gpu_device.memcpyDeviceToHost(full_prec2.data(), d_res2_float, num_elem*sizeof(float));
+ gpu_device.synchronize();
+
+ for (int i = 0; i < num_elem; ++i) {
+ std::cout << "Checking elemwise exp " << i << " input = " << input1(i) << " full = " << full_prec1(i) << " half = " << half_prec1(i) << std::endl;
+ VERIFY_IS_APPROX(full_prec1(i), half_prec1(i));
+ }
+ for (int i = 0; i < num_elem; ++i) {
+ std::cout << "Checking elemwise log " << i << " input = " << input2(i) << " full = " << full_prec2(i) << " half = " << half_prec2(i) << std::endl;
+ VERIFY_IS_APPROX(full_prec2(i), half_prec2(i));
+ }
+ gpu_device.deallocate(d_float1);
+ gpu_device.deallocate(d_float2);
+ gpu_device.deallocate(d_res1_half);
+ gpu_device.deallocate(d_res1_float);
+ gpu_device.deallocate(d_res2_half);
+ gpu_device.deallocate(d_res2_float);
+}
+
void test_cuda_contractions() {
Eigen::CudaStreamDevice stream;
@@ -280,6 +342,7 @@ void test_cxx11_tensor_of_float16_cuda()
CALL_SUBTEST_1(test_cuda_conversion());
CALL_SUBTEST_1(test_cuda_unary());
CALL_SUBTEST_1(test_cuda_elementwise());
+ CALL_SUBTEST_1(test_cuda_trancendental());
CALL_SUBTEST_2(test_cuda_contractions());
CALL_SUBTEST_3(test_cuda_reductions());
CALL_SUBTEST_4(test_cuda_forced_evals());
diff --git a/unsupported/test/cxx11_tensor_simple.cpp b/unsupported/test/cxx11_tensor_simple.cpp
index 47d4d8636..fe860c970 100644
--- a/unsupported/test/cxx11_tensor_simple.cpp
+++ b/unsupported/test/cxx11_tensor_simple.cpp
@@ -195,7 +195,10 @@ static void test_3d()
VERIFY_IS_EQUAL((epsilon(0,2,1)), -1);
VERIFY_IS_EQUAL((epsilon(1,0,2)), -1);
- array<Eigen::DenseIndex, 3> dims{{2,3,4}};
+ array<Eigen::DenseIndex, 3> dims;
+ dims[0] = 2;
+ dims[1] = 3;
+ dims[2] = 4;
Tensor<int, 3> t1(dims);
Tensor<int, 3, RowMajor> t2(dims);