aboutsummaryrefslogtreecommitdiffhomepage
path: root/unsupported/Eigen/CXX11/src
diff options
context:
space:
mode:
authorGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-05-03 13:15:00 -0700
committerGravatar Benoit Steiner <benoit.steiner.goog@gmail.com>2016-05-03 13:15:00 -0700
commit4c05fb03a3c9728b2db9da1ca986ef5103fcdec8 (patch)
tree75c73864384cdbf6e26cdacd7ffcf71b395be263 /unsupported/Eigen/CXX11/src
parent577a07a86e521e8d1a3d63a98621649f73c44c42 (diff)
parent2c5568a757e75b1e8dd6b8754ea3d13a95be96ce (diff)
Merged eigen/eigen into default
Diffstat (limited to 'unsupported/Eigen/CXX11/src')
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h58
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h6
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h10
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h4
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h92
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h2
-rw-r--r--unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h12
-rw-r--r--unsupported/Eigen/CXX11/src/util/CXX11Meta.h2
-rw-r--r--unsupported/Eigen/CXX11/src/util/MaxSizeVector.h2
15 files changed, 147 insertions, 57 deletions
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
index 5abff0800..cb615c75b 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h
@@ -36,7 +36,7 @@ struct traits<TensorAssignOp<LhsXprType, RhsXprType> >
static const int Layout = internal::traits<LhsXprType>::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
@@ -100,7 +100,7 @@ struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
- RawAccess = TensorEvaluator<LeftArgType, Device>::RawAccess,
+ RawAccess = TensorEvaluator<LeftArgType, Device>::RawAccess
};
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
index 97182258d..6f113b903 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h
@@ -41,7 +41,7 @@ struct traits<TensorContractionOp<Dimensions, LhsXprType, RhsXprType> >
static const int Layout = traits<LhsXprType>::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
@@ -588,7 +588,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
enum {
- Layout = TensorEvaluator<LeftArgType, Device>::Layout,
+ Layout = TensorEvaluator<LeftArgType, Device>::Layout
};
// Most of the code is assuming that both input tensors are ColMajor. If the
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h
index dbff660a9..6a3ef14ef 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h
@@ -543,12 +543,12 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
#define prefetch_lhs(reg, row, col) \
if (!CHECK_LHS_BOUNDARY) { \
if (col < k_size) { \
- reg =lhs.loadPacket(row, col); \
+ reg =lhs.loadPacket<Unaligned>(row, col); \
} \
} else { \
if (col < k_size) { \
if (row + 3 < m_size) { \
- reg =lhs.loadPacket(row, col); \
+ reg =lhs.loadPacket<Unaligned>(row, col); \
} else if (row + 2 < m_size) { \
reg.x =lhs(row + 0, col); \
reg.y =lhs(row + 1, col); \
@@ -578,7 +578,7 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
if (!CHECK_RHS_BOUNDARY) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
- rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0);
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@@ -593,7 +593,7 @@ EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rh
} else {
if (rhs_horiz0 < n_size) {
if ((rhs_vert + 3) < k_size) {
- rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0);
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
} else if ((rhs_vert + 2) < k_size) {
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0);
@@ -790,37 +790,37 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
if (!CHECK_LHS_BOUNDARY) {
if ((threadIdx.y/4+k+24) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8));
- lhs_pf2 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+16));
- lhs_pf3 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+24));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
+ lhs_pf3 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8));
- lhs_pf2 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+16));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
}
} else {
// just CHECK_LHS_BOUNDARY
if (lhs_vert + 3 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8));
- lhs_pf2 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+16));
- lhs_pf3 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+24));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
+ lhs_pf3 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+24));
} else if ((threadIdx.y/4+k+16) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8));
- lhs_pf2 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+16));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf2 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+16));
} else if ((threadIdx.y/4+k+8) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
- lhs_pf1 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k+8));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf1 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k+8));
} else if ((threadIdx.y/4+k) < k_size) {
- lhs_pf0 =lhs.loadPacket(lhs_vert, (threadIdx.y/4+k));
+ lhs_pf0 =lhs.loadPacket<Unaligned>(lhs_vert, (threadIdx.y/4+k));
}
} else if (lhs_vert + 2 < m_size) {
if ((threadIdx.y/4+k+24) < k_size) {
@@ -909,8 +909,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
if (!CHECK_RHS_BOUNDARY) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
- rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0);
- rhs_pf1 = rhs.loadPacket(rhs_vert, rhs_horiz1);
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
+ rhs_pf1 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@@ -932,8 +932,8 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
if (rhs_horiz1 < n_size) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
- rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0);
- rhs_pf1 = rhs.loadPacket(rhs_vert, rhs_horiz1);
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
+ rhs_pf1 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz1);
} else if (rhs_vert + 2 < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
@@ -954,7 +954,7 @@ EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs,
} else if (rhs_horiz0 < n_size) {
if ((rhs_vert + 3) < k_size) {
// just CHECK_RHS_BOUNDARY
- rhs_pf0 = rhs.loadPacket(rhs_vert, rhs_horiz0);
+ rhs_pf0 = rhs.loadPacket<Unaligned>(rhs_vert, rhs_horiz0);
} else if ((rhs_vert + 2) < k_size) {
// just CHECK_RHS_BOUNDARY
rhs_pf0.x = rhs(rhs_vert, rhs_horiz0);
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
index 392cb6e3d..b27e1a1b4 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h
@@ -16,7 +16,7 @@ namespace internal {
enum {
Rhs = 0,
- Lhs = 1,
+ Lhs = 1
};
/*
@@ -233,7 +233,7 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar,
typedef typename Tensor::PacketReturnType Packet;
typedef typename unpacket_traits<Packet>::half HalfPacket;
- template <int AlignmentType = Alignment>
+ template <int AlignmentType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Packet loadPacket(Index i, Index j) const {
// whole method makes column major assumption
@@ -276,7 +276,7 @@ class BaseTensorContractionMapper : public SimpleTensorContractionMapper<Scalar,
return pload<Packet>(data);
}
- template <int AlignmentType = Alignment>
+ template <int AlignmentType>
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE HalfPacket loadHalfPacket(Index i, Index j) const {
// whole method makes column major assumption
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h
index ff3c5662d..091007ab7 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h
@@ -233,7 +233,7 @@ struct traits<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType> >
static const int Layout = traits<InputXprType>::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h b/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
index 4e8f86674..0f6dcedaa 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h
@@ -34,25 +34,25 @@ class TensorOpCost {
template <typename ArgType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int MulCost() {
return internal::functor_traits<
- internal::scalar_product_op<ArgType, ArgType>>::Cost;
+ internal::scalar_product_op<ArgType, ArgType> >::Cost;
}
template <typename ArgType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int AddCost() {
- return internal::functor_traits<internal::scalar_sum_op<ArgType>>::Cost;
+ return internal::functor_traits<internal::scalar_sum_op<ArgType> >::Cost;
}
template <typename ArgType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int DivCost() {
return internal::functor_traits<
- internal::scalar_quotient_op<ArgType, ArgType>>::Cost;
+ internal::scalar_quotient_op<ArgType, ArgType> >::Cost;
}
template <typename ArgType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int ModCost() {
- return internal::functor_traits<internal::scalar_mod_op<ArgType>>::Cost;
+ return internal::functor_traits<internal::scalar_mod_op<ArgType> >::Cost;
}
template <typename SrcType, typename TargetType>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static int CastCost() {
return internal::functor_traits<
- internal::scalar_cast_op<SrcType, TargetType>>::Cost;
+ internal::scalar_cast_op<SrcType, TargetType> >::Cost;
}
TensorOpCost() : bytes_loaded_(0), bytes_stored_(0), compute_cycles_(0) {}
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
index 977dcafb0..f0b8ac958 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h
@@ -275,7 +275,7 @@ struct DSizes : array<DenseIndex, NumDims> {
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex TotalSize() const {
- return internal::array_prod(*static_cast<const Base*>(this));
+ return (NumDims == 0) ? 1 : internal::array_prod(*static_cast<const Base*>(this));
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DSizes() {
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
index 5c6748a43..c556fec0f 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h
@@ -34,7 +34,7 @@ struct traits<TensorEvalToOp<XprType> >
static const int Layout = XprTraits::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h b/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
index 49d849e23..8491c4ca2 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h
@@ -40,7 +40,7 @@ struct traits<TensorCwiseNullaryOp<NullaryOp, XprType> >
static const int Layout = XprTraits::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
@@ -163,7 +163,7 @@ struct traits<TensorCwiseBinaryOp<BinaryOp, LhsXprType, RhsXprType> >
static const int Layout = XprTraits::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
index 9c0ed43b7..b27ee0084 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h
@@ -128,7 +128,6 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
return m_storage.data()[0];
}
-
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const
@@ -137,8 +136,54 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
+#else
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
+ {
+ if (Options&RowMajor) {
+ const Index index = i1 + i0 * m_storage.dimensions()[1];
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + i1 * m_storage.dimensions()[0];
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
+ {
+ if (Options&RowMajor) {
+ const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
+ {
+ if (Options&RowMajor) {
+ const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
+ {
+ if (Options&RowMajor) {
+ const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
+ return m_storage.data()[index];
+ }
+ }
#endif
+
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
{
@@ -176,6 +221,51 @@ class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_,
EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
}
+#else
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
+ {
+ if (Options&RowMajor) {
+ const Index index = i1 + i0 * m_storage.dimensions()[1];
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + i1 * m_storage.dimensions()[0];
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
+ {
+ if (Options&RowMajor) {
+ const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
+ {
+ if (Options&RowMajor) {
+ const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
+ return m_storage.data()[index];
+ }
+ }
+ EIGEN_DEVICE_FUNC
+ EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
+ {
+ if (Options&RowMajor) {
+ const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
+ return m_storage.data()[index];
+ } else {
+ const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
+ return m_storage.data()[index];
+ }
+ }
#endif
EIGEN_DEVICE_FUNC
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
index 1ce53ad69..7ec757519 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h
@@ -34,7 +34,7 @@ struct traits<TensorForcedEvalOp<XprType> >
static const int Layout = XprTraits::Layout;
enum {
- Flags = 0,
+ Flags = 0
};
};
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h b/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
index 63a8476ef..cd0109ef4 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h
@@ -181,7 +181,7 @@ template<typename ArgType, typename Device>
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor,
- CoordAccess = false, // to be implemented
+ CoordAccess = false // to be implemented
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h b/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
index 2f06f8442..b7597b3a5 100644
--- a/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
+++ b/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h
@@ -40,7 +40,7 @@ class compute_tensor_flags
};
public:
- enum { ret = packet_access_bit};
+ enum { ret = packet_access_bit };
};
@@ -54,7 +54,7 @@ struct traits<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor;
enum {
Options = Options_,
- Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0 : LvalueBit),
+ Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0 : LvalueBit)
};
};
@@ -69,7 +69,7 @@ struct traits<TensorFixedSize<Scalar_, Dimensions, Options_, IndexType_> >
static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor;
enum {
Options = Options_,
- Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0: LvalueBit),
+ Flags = compute_tensor_flags<Scalar_, Options_>::ret | (is_const<Scalar_>::value ? 0: LvalueBit)
};
};
@@ -86,7 +86,7 @@ struct traits<TensorMap<PlainObjectType, Options_> >
static const int Layout = BaseTraits::Layout;
enum {
Options = Options_,
- Flags = BaseTraits::Flags,
+ Flags = BaseTraits::Flags
};
};
@@ -102,7 +102,7 @@ struct traits<TensorRef<PlainObjectType> >
static const int Layout = BaseTraits::Layout;
enum {
Options = BaseTraits::Options,
- Flags = BaseTraits::Flags,
+ Flags = BaseTraits::Flags
};
};
@@ -253,7 +253,7 @@ struct nested<const TensorRef<PlainObjectType> >
// Pc=0.
typedef enum {
PADDING_VALID = 1,
- PADDING_SAME = 2,
+ PADDING_SAME = 2
} PaddingType;
} // end namespace Eigen
diff --git a/unsupported/Eigen/CXX11/src/util/CXX11Meta.h b/unsupported/Eigen/CXX11/src/util/CXX11Meta.h
index f479590b9..ec27eddb8 100644
--- a/unsupported/Eigen/CXX11/src/util/CXX11Meta.h
+++ b/unsupported/Eigen/CXX11/src/util/CXX11Meta.h
@@ -535,7 +535,7 @@ InstType instantiate_by_c_array(ArrType* arr)
#else // Non C++11, fallback to emulation mode
-#include "src/Core/util/EmulateCXX11Meta.h"
+#include "EmulateCXX11Meta.h"
#endif
diff --git a/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h b/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
index 551124bae..961456f10 100644
--- a/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
+++ b/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h
@@ -41,7 +41,7 @@ class MaxSizeVector {
// Construct a new MaxSizeVector, reserve and resize to n.
// Copy the init value to all elements.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
- explicit MaxSizeVector(size_t n, const T& init)
+ MaxSizeVector(size_t n, const T& init)
: reserve_(n), size_(n),
data_(static_cast<T*>(internal::aligned_malloc(n * sizeof(T)))) {
for (size_t i = 0; i < n; ++i) { new (&data_[i]) T(init); }